hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
9518ca38c51b423f569917d7fae47c816167a173.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include"Graph.h" #include "service.h" #include"taskPath.h" #include"valuemark.h" #include"hiprand/hiprand_kernel.h" #include"iostream" #include <fstream> #include"const.h" #include<math.h> #include"BFS.h" #include"GAparrel.h" #include<time.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include"PathArrange.h" int checksum(int*array){ int sum = 0; for (int i = 0; i < Task; i++) sum += array[i]; return sum; } __device__ int Curand(unsigned int*seed, unsigned int offset, int*array) { unsigned long m = 31721474647; int a = array[(offset*(*seed))% 99999]; unsigned long x = (unsigned long)*seed; x = (a*x) % m; *seed = (unsigned int)a*x; return((int)a); } __global__ void RawChormes(int*chormes,int *pathnum,int*hops,unsigned int*seed,int*array,float*rawvalue,int*rawmark,float*demand){ int taskid=blockIdx.y; int popid=blockIdx.x*blockDim.x+threadIdx.x; if(popid>=pop||taskid>=Task) return; int choice=Curand(seed,taskid,array)%(pathnum[taskid]+1)-1; int Cid=popid*Task+taskid; chormes[Cid]=choice; rawvalue[Cid]=pow((float)demand[taskid],0.5)/hops[taskid*ROD+choice]; rawmark[Cid]=taskid; } __global__ void Cook(int*chormes,int*pathset,int pathd,float*popmcap,float*demand,int*rawmark){ int popid=blockIdx.x*blockDim.x+threadIdx.x; if(popid>=pop) return; for(int i=0;i<Task;i++) { int mi=rawmark[popid*Task+i]; int flag=0; int k=chormes[popid*Task + mi]; int dim = mi*ROD* pathd + k*pathd; int j=0; int e; while(true){ e=pathset[dim+j]; if(e<0) break; if(popmcap[popid*EDge+e]<demand[mi]) { flag=1; chormes[popid*Task + mi]=-1; break; } j++; } if(flag==0) { j=0; while(true) { e=pathset[dim+j]; if(e<0) break; popmcap[popid*EDge+e]-=demand[mi]; j++; } } } } __global__ void Fitor(int*hops,float*capacity,int*chormes,float*demand,int *pathset,int pathd,int*fits_key,float*fits_value){ int chonum = blockIdx.x; int threadid = threadIdx.x; int blockdim = blockDim.x; __shared__ float f[PERB]; //caculate load; for (int i = threadid; i <PERB; i += blockdim) f[i] = 0; __syncthreads(); for (int i = threadid; i < Task; i += blockdim) { int k = chormes[chonum*Task + i]; if (k>=0){ float deman = demand[i]; int j = 0; int e; int dim = i*ROD* pathd + k*pathd; while (true){ e = pathset[dim + j]; if (e < 0) break; atomicAdd(&f[e], deman); j++; } } } __syncthreads(); //if load is overflow demand =100*Task; for (int i = threadid; i <PERB; i += blockdim) { float deman = 0; if (i < Task) { int k = chormes[chonum*Task + i]; deman = (k<0) ?(INFHOPS*demand[i]):(hops[i*ROD+k]*demand[i]); } f[i] = (f[i]>capacity[i])?(deman+100*Task*INFHOPS):deman; } __syncthreads(); //pre add for reduce; if (PERB> (blockdim)) { for (int i = threadid+blockdim; i <PERB; i += blockdim) f[threadid] += f[i]; } __syncthreads(); //reduce add int size = (PERB<blockdim) ?PERB: blockdim; for (int s = size; s>1; s = (s + 1) / 2) { if (threadid<s/2) f[threadid] += f[threadid + (s + 1) / 2]; __syncthreads(); } //write to global memery if (threadid == 0){ fits_value[chonum] = f[0]; fits_key[chonum] =chonum; } } __global__ void GetParents(int* parents, int*randarray, unsigned int *seed){ int id = blockIdx.x*blockDim.x+threadIdx.x; if (id >= 2 * Beta) return; if (id < Beta) parents[id] = Curand(seed, id, randarray) % ALPHA; else parents[id] = Curand(seed, id, randarray) % (Beta+ ALPHA); } __global__ void CudaCross(int*children, int*chormes,int*fits_key,int*randarray,unsigned int *seed,int*parents){ unsigned int blockid = blockIdx.y; unsigned int threadid = threadIdx.x + blockIdx.x*blockDim.x; if (threadid >= Task) return; unsigned int position = blockid * 2; if (position + 1 >=Beta) return; int monther = parents[blockid]; int father = parents[blockid+ Beta]; int mask = Curand(seed,threadid,randarray) % 2; if (mask<1) { children[position*Task + threadid] = chormes[fits_key[father]*Task + threadid]; children[(position + 1)*Task + threadid] = chormes[fits_key[monther]*Task + threadid]; } else { children[position*Task + threadid] = chormes[fits_key[monther]*Task + threadid]; children[(position + 1)*Task + threadid] = chormes[fits_key[father]*Task + threadid]; } __syncthreads(); } __global__ void GetMu(int*muinfo,int*children, int*chormes,int*randarray, unsigned int *seed, int*pathnum){ int id = threadIdx.x + blockIdx.x*blockDim.x; if (id >=Gama) return; int muc = Curand(seed, id, randarray) % pop; int mup= Curand(seed, id * 13, randarray) % Task; int newv = Curand(seed, id * 71, randarray) % (pathnum[mup] + 1); if (newv == pathnum[mup]) newv = -1; muinfo[id * 3] = muc; muinfo[id * 3 + 1] = mup; muinfo[id * 3 + 2] = newv; } __global__ void CudaMutation(int*muinfo,int*children, int*chormes, int*pathnum){ int conum = blockIdx.y; int id = threadIdx.x + blockIdx.x*blockDim.x; if (id > Task) return; if (id == muinfo[3 * conum + 1]) children[(conum+Beta)*Task + id] = muinfo[3 * conum + 2]; else children[(conum + Beta)*Task + id] =chormes[muinfo[3 * conum] * Task + id]; } __global__ void Reload(int*chormes,int*children,int*fits_key){ int conum = blockIdx.y; int id = threadIdx.x + blockIdx.x*blockDim.x; if (conum>=(Beta+Gama)||id >=Task) return; chormes[fits_key[conum + ALPHA]*Task + id] = children[conum*Task + id]; } void NewGAParrel::cudamalloc(){ hipMalloc((void**)&dev_chormes, Task*pop*sizeof(int)); hipMalloc((void**)&dev_demand, Task*sizeof(float)); hipMalloc((void**)&dev_childs, Task*(Beta+Gama)*sizeof(int)); hipMalloc((void**)&dev_capacity, G.m*sizeof(float)); hipMalloc((void**)&dev_pathset, Task*taskd*sizeof(int)); hipMalloc((void**)(&dev_randarray), sizeof(int)*100000); hipMalloc((void**)(&dev_parents), sizeof(int)*Beta*2); hipMalloc((void**)(&dev_seed), sizeof(unsigned int)); hipMalloc((void**)(&dev_pathnum), sizeof(int)*Task); hipMalloc((void**)(&dev_muinfo), sizeof(int)*Gama*3); hipMalloc((void**)(&dev_fit_key), sizeof(int)*pop); hipMalloc((void**)(&dev_fit_value), sizeof(float)*pop); hipMalloc((void**)(&dev_hops), sizeof(int)*ROD*Task); hipMalloc((void**)(&dev_rawvalue), sizeof(float)*pop*Task); hipMalloc((void**)(&dev_rawmark), sizeof(int)*pop*Task); hipMalloc((void**)(&dev_popmcap), sizeof(float)*G.m*pop); hipMemcpy(dev_rawmark,rawmark, sizeof(int)*pop*Task, hipMemcpyHostToDevice); hipMemcpy(dev_rawvalue,rawvalue, sizeof(float)*pop*Task, hipMemcpyHostToDevice); hipMemcpy(dev_popmcap,popmcap, sizeof(float)*pop*G.m, hipMemcpyHostToDevice); hipMemcpy(dev_capacity,capacity, G.m*sizeof(float), hipMemcpyHostToDevice); } void NewGAParrel::cudapre(){ hipMemcpy(dev_chormes,chormes, Task*pop*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_demand,demand,Task*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_childs,childs,Task*(Beta+Gama)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_pathset,pathset,Task*taskd*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_randarray, randarray, sizeof(int)*100000, hipMemcpyHostToDevice); hipMemcpy(dev_parents,parents, sizeof(int)*Beta*2, hipMemcpyHostToDevice); hipMemcpy(dev_seed,seed, sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(dev_pathnum,pathnum, sizeof(int)*Task, hipMemcpyHostToDevice); hipMemcpy(dev_muinfo,muinfo, sizeof(int)*Gama*3, hipMemcpyHostToDevice); hipMemcpy(dev_fit_key,fit_key, sizeof(int)*pop, hipMemcpyHostToDevice); hipMemcpy(dev_fit_value,fit_value, sizeof(float)*pop, hipMemcpyHostToDevice); hipMemcpy(dev_hops,hops, sizeof(int)*ROD*Task, hipMemcpyHostToDevice); } void NewGAParrel::cudafree(){ hipFree(dev_chormes); hipFree(dev_demand); hipFree(dev_childs); hipFree(dev_capacity); hipFree(dev_pathset); hipFree(dev_randarray); hipFree(dev_parents); hipFree(dev_seed); hipFree(dev_pathnum); hipFree(dev_muinfo); hipFree(dev_fit_key); hipFree(dev_fit_value); hipFree(dev_hops); hipFree(dev_rawmark); hipFree(dev_rawvalue); hipFree(dev_popmcap); } void NewGAParrel::parrelmake(){ dim3 blocks_s(pop/512 + 1,Task); RawChormes<< <blocks_s,512>> >(dev_chormes,dev_pathnum,dev_hops,dev_seed,dev_randarray,dev_rawvalue,dev_rawmark,dev_demand); thrust::device_ptr<float> dev_rv(dev_rawvalue); thrust::device_ptr<int> dev_rm(dev_rawmark); for(int i=0;i<pop;i++) thrust::sort_by_key((dev_rv+i*Task),(dev_rv+(i+1)*Task) ,(dev_rm+i*Task),thrust::greater<float>()); hipMemcpy(rawmark,dev_rawmark, sizeof(int)*pop*Task, hipMemcpyDeviceToHost); Cook<< <pop,512>> >(dev_chormes,dev_pathset,pathd,dev_popmcap,dev_demand,dev_rawmark); } void NewGAParrel::process(){ } vector<pair<string,float> > NewGAParrel::GAsearch(){ cudamalloc(); cout<<"GA Parallel searching......."<<endl; float start=float(1000*clock())/ CLOCKS_PER_SEC; GoldnessMake(); cudapre(); best =Task*100*INFHOPS; int count = 0; int iter=0; thrust::device_ptr<float> dev_fv(dev_fit_value); thrust::device_ptr<int> dev_fk(dev_fit_key); vector<float>middata; int mkd=2; for (int i = 0; i <10000000; i++) { iter++; seed++; Fitor << <pop,1024 >> >(dev_hops,dev_capacity, dev_chormes, dev_demand, dev_pathset, pathd, dev_fit_key,dev_fit_value); hipMemcpy(fit_key,dev_fit_key, sizeof(int)*pop, hipMemcpyDeviceToHost); hipMemcpy(fit_value,dev_fit_value, sizeof(float)*pop, hipMemcpyDeviceToHost); /*for(int k=0;k<pop;k++) cout<<fit_value[k]<<" "; cout<<endl;*/ thrust::sort_by_key(dev_fv,dev_fv+pop ,dev_fk,thrust::less<float>()); hipMemcpy(fit_value,dev_fit_value, sizeof(float)*pop, hipMemcpyDeviceToHost); float ans=fit_value[0]; GetParents << <(Beta * 2 + 511) / 512, 512 >> >(dev_parents,dev_randarray,dev_seed); dim3 blocks_s(Task / 1024 + 1, Beta / 2 + 1); CudaCross << <blocks_s, 1024 >> >(dev_childs,dev_chormes,dev_fit_key,dev_randarray,dev_seed,dev_parents); GetMu << <Gama / 1024 + 1, 1024 >> >(dev_muinfo,dev_childs,dev_chormes,dev_randarray,dev_seed,dev_pathnum); dim3 blocks_s2(Task / 1024 + 1, Gama); CudaMutation << <blocks_s2, 1024 >> >(dev_muinfo, dev_childs, dev_chormes, dev_pathnum); dim3 blocks_s3(Task / 1024 + 1, Gama + Beta); Reload << <blocks_s3, 1024 >> >(dev_chormes, dev_childs, dev_fit_key); //cout<<ans<<" "<<best<<endl; if (ans<best) { mkd--; best = ans; count = 0; } else count++; middata.push_back(ans); if(mkd>0&&count<100) continue; time_t now=1000*clock()/ CLOCKS_PER_SEC; if (count>loomore||((now-start)>EXPIRE&&GANOEX<0)) break; } hipMemcpy(chormes,dev_chormes, Task*pop*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(fit_key,dev_fit_key, sizeof(int)*pop, hipMemcpyDeviceToHost); float end = float(1000*clock())/ CLOCKS_PER_SEC; pair<float,int>md=more(); vector<pair<string,float>>rdata; float lowbound=0; for(int i=0;i<Task;i++) lowbound+=demand[i]*INFHOPS; float gap=middata[middata.size()-1]-best; cout<<"gap is"<<gap<<endl; for(int i=0;i<middata.size();i++) middata[i]-=gap; CheckR(&G,Result,serv,string("GA_Paralle")); //cout<<"affier is:"<<affier<<endl; writejsoniter(GAPFILE,middata,string("GA_Paralle")); rdata.push_back(make_pair(string("object"),best)); rdata.push_back(make_pair(string("inf_obj"),lowbound)); rdata.push_back(make_pair(string("task_add_in"),md.second)); rdata.push_back(make_pair(string("flow_add_in"),md.first)); rdata.push_back(make_pair(string("total_weight"),totalweight)); rdata.push_back(make_pair(string("time"),(end-start)+affier)); rdata.push_back(make_pair(string("iter_num"),iter)); rdata.push_back(make_pair(string("iter_time"),float(end-start+affier)/iter)); rdata.push_back(make_pair(string("gap"),gap)); writejsondata(DATAFILE,rdata,string("GA_Paralle")); return rdata; }
9518ca38c51b423f569917d7fae47c816167a173.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include"Graph.h" #include "service.h" #include"taskPath.h" #include"valuemark.h" #include"curand_kernel.h" #include"iostream" #include <fstream> #include"const.h" #include<math.h> #include"BFS.h" #include"GAparrel.h" #include<time.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include"PathArrange.h" int checksum(int*array){ int sum = 0; for (int i = 0; i < Task; i++) sum += array[i]; return sum; } __device__ int Curand(unsigned int*seed, unsigned int offset, int*array) { unsigned long m = 31721474647; int a = array[(offset*(*seed))% 99999]; unsigned long x = (unsigned long)*seed; x = (a*x) % m; *seed = (unsigned int)a*x; return((int)a); } __global__ void RawChormes(int*chormes,int *pathnum,int*hops,unsigned int*seed,int*array,float*rawvalue,int*rawmark,float*demand){ int taskid=blockIdx.y; int popid=blockIdx.x*blockDim.x+threadIdx.x; if(popid>=pop||taskid>=Task) return; int choice=Curand(seed,taskid,array)%(pathnum[taskid]+1)-1; int Cid=popid*Task+taskid; chormes[Cid]=choice; rawvalue[Cid]=pow((float)demand[taskid],0.5)/hops[taskid*ROD+choice]; rawmark[Cid]=taskid; } __global__ void Cook(int*chormes,int*pathset,int pathd,float*popmcap,float*demand,int*rawmark){ int popid=blockIdx.x*blockDim.x+threadIdx.x; if(popid>=pop) return; for(int i=0;i<Task;i++) { int mi=rawmark[popid*Task+i]; int flag=0; int k=chormes[popid*Task + mi]; int dim = mi*ROD* pathd + k*pathd; int j=0; int e; while(true){ e=pathset[dim+j]; if(e<0) break; if(popmcap[popid*EDge+e]<demand[mi]) { flag=1; chormes[popid*Task + mi]=-1; break; } j++; } if(flag==0) { j=0; while(true) { e=pathset[dim+j]; if(e<0) break; popmcap[popid*EDge+e]-=demand[mi]; j++; } } } } __global__ void Fitor(int*hops,float*capacity,int*chormes,float*demand,int *pathset,int pathd,int*fits_key,float*fits_value){ int chonum = blockIdx.x; int threadid = threadIdx.x; int blockdim = blockDim.x; __shared__ float f[PERB]; //caculate load; for (int i = threadid; i <PERB; i += blockdim) f[i] = 0; __syncthreads(); for (int i = threadid; i < Task; i += blockdim) { int k = chormes[chonum*Task + i]; if (k>=0){ float deman = demand[i]; int j = 0; int e; int dim = i*ROD* pathd + k*pathd; while (true){ e = pathset[dim + j]; if (e < 0) break; atomicAdd(&f[e], deman); j++; } } } __syncthreads(); //if load is overflow demand =100*Task; for (int i = threadid; i <PERB; i += blockdim) { float deman = 0; if (i < Task) { int k = chormes[chonum*Task + i]; deman = (k<0) ?(INFHOPS*demand[i]):(hops[i*ROD+k]*demand[i]); } f[i] = (f[i]>capacity[i])?(deman+100*Task*INFHOPS):deman; } __syncthreads(); //pre add for reduce; if (PERB> (blockdim)) { for (int i = threadid+blockdim; i <PERB; i += blockdim) f[threadid] += f[i]; } __syncthreads(); //reduce add int size = (PERB<blockdim) ?PERB: blockdim; for (int s = size; s>1; s = (s + 1) / 2) { if (threadid<s/2) f[threadid] += f[threadid + (s + 1) / 2]; __syncthreads(); } //write to global memery if (threadid == 0){ fits_value[chonum] = f[0]; fits_key[chonum] =chonum; } } __global__ void GetParents(int* parents, int*randarray, unsigned int *seed){ int id = blockIdx.x*blockDim.x+threadIdx.x; if (id >= 2 * Beta) return; if (id < Beta) parents[id] = Curand(seed, id, randarray) % ALPHA; else parents[id] = Curand(seed, id, randarray) % (Beta+ ALPHA); } __global__ void CudaCross(int*children, int*chormes,int*fits_key,int*randarray,unsigned int *seed,int*parents){ unsigned int blockid = blockIdx.y; unsigned int threadid = threadIdx.x + blockIdx.x*blockDim.x; if (threadid >= Task) return; unsigned int position = blockid * 2; if (position + 1 >=Beta) return; int monther = parents[blockid]; int father = parents[blockid+ Beta]; int mask = Curand(seed,threadid,randarray) % 2; if (mask<1) { children[position*Task + threadid] = chormes[fits_key[father]*Task + threadid]; children[(position + 1)*Task + threadid] = chormes[fits_key[monther]*Task + threadid]; } else { children[position*Task + threadid] = chormes[fits_key[monther]*Task + threadid]; children[(position + 1)*Task + threadid] = chormes[fits_key[father]*Task + threadid]; } __syncthreads(); } __global__ void GetMu(int*muinfo,int*children, int*chormes,int*randarray, unsigned int *seed, int*pathnum){ int id = threadIdx.x + blockIdx.x*blockDim.x; if (id >=Gama) return; int muc = Curand(seed, id, randarray) % pop; int mup= Curand(seed, id * 13, randarray) % Task; int newv = Curand(seed, id * 71, randarray) % (pathnum[mup] + 1); if (newv == pathnum[mup]) newv = -1; muinfo[id * 3] = muc; muinfo[id * 3 + 1] = mup; muinfo[id * 3 + 2] = newv; } __global__ void CudaMutation(int*muinfo,int*children, int*chormes, int*pathnum){ int conum = blockIdx.y; int id = threadIdx.x + blockIdx.x*blockDim.x; if (id > Task) return; if (id == muinfo[3 * conum + 1]) children[(conum+Beta)*Task + id] = muinfo[3 * conum + 2]; else children[(conum + Beta)*Task + id] =chormes[muinfo[3 * conum] * Task + id]; } __global__ void Reload(int*chormes,int*children,int*fits_key){ int conum = blockIdx.y; int id = threadIdx.x + blockIdx.x*blockDim.x; if (conum>=(Beta+Gama)||id >=Task) return; chormes[fits_key[conum + ALPHA]*Task + id] = children[conum*Task + id]; } void NewGAParrel::cudamalloc(){ cudaMalloc((void**)&dev_chormes, Task*pop*sizeof(int)); cudaMalloc((void**)&dev_demand, Task*sizeof(float)); cudaMalloc((void**)&dev_childs, Task*(Beta+Gama)*sizeof(int)); cudaMalloc((void**)&dev_capacity, G.m*sizeof(float)); cudaMalloc((void**)&dev_pathset, Task*taskd*sizeof(int)); cudaMalloc((void**)(&dev_randarray), sizeof(int)*100000); cudaMalloc((void**)(&dev_parents), sizeof(int)*Beta*2); cudaMalloc((void**)(&dev_seed), sizeof(unsigned int)); cudaMalloc((void**)(&dev_pathnum), sizeof(int)*Task); cudaMalloc((void**)(&dev_muinfo), sizeof(int)*Gama*3); cudaMalloc((void**)(&dev_fit_key), sizeof(int)*pop); cudaMalloc((void**)(&dev_fit_value), sizeof(float)*pop); cudaMalloc((void**)(&dev_hops), sizeof(int)*ROD*Task); cudaMalloc((void**)(&dev_rawvalue), sizeof(float)*pop*Task); cudaMalloc((void**)(&dev_rawmark), sizeof(int)*pop*Task); cudaMalloc((void**)(&dev_popmcap), sizeof(float)*G.m*pop); cudaMemcpy(dev_rawmark,rawmark, sizeof(int)*pop*Task, cudaMemcpyHostToDevice); cudaMemcpy(dev_rawvalue,rawvalue, sizeof(float)*pop*Task, cudaMemcpyHostToDevice); cudaMemcpy(dev_popmcap,popmcap, sizeof(float)*pop*G.m, cudaMemcpyHostToDevice); cudaMemcpy(dev_capacity,capacity, G.m*sizeof(float), cudaMemcpyHostToDevice); } void NewGAParrel::cudapre(){ cudaMemcpy(dev_chormes,chormes, Task*pop*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_demand,demand,Task*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_childs,childs,Task*(Beta+Gama)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_pathset,pathset,Task*taskd*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_randarray, randarray, sizeof(int)*100000, cudaMemcpyHostToDevice); cudaMemcpy(dev_parents,parents, sizeof(int)*Beta*2, cudaMemcpyHostToDevice); cudaMemcpy(dev_seed,seed, sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(dev_pathnum,pathnum, sizeof(int)*Task, cudaMemcpyHostToDevice); cudaMemcpy(dev_muinfo,muinfo, sizeof(int)*Gama*3, cudaMemcpyHostToDevice); cudaMemcpy(dev_fit_key,fit_key, sizeof(int)*pop, cudaMemcpyHostToDevice); cudaMemcpy(dev_fit_value,fit_value, sizeof(float)*pop, cudaMemcpyHostToDevice); cudaMemcpy(dev_hops,hops, sizeof(int)*ROD*Task, cudaMemcpyHostToDevice); } void NewGAParrel::cudafree(){ cudaFree(dev_chormes); cudaFree(dev_demand); cudaFree(dev_childs); cudaFree(dev_capacity); cudaFree(dev_pathset); cudaFree(dev_randarray); cudaFree(dev_parents); cudaFree(dev_seed); cudaFree(dev_pathnum); cudaFree(dev_muinfo); cudaFree(dev_fit_key); cudaFree(dev_fit_value); cudaFree(dev_hops); cudaFree(dev_rawmark); cudaFree(dev_rawvalue); cudaFree(dev_popmcap); } void NewGAParrel::parrelmake(){ dim3 blocks_s(pop/512 + 1,Task); RawChormes<< <blocks_s,512>> >(dev_chormes,dev_pathnum,dev_hops,dev_seed,dev_randarray,dev_rawvalue,dev_rawmark,dev_demand); thrust::device_ptr<float> dev_rv(dev_rawvalue); thrust::device_ptr<int> dev_rm(dev_rawmark); for(int i=0;i<pop;i++) thrust::sort_by_key((dev_rv+i*Task),(dev_rv+(i+1)*Task) ,(dev_rm+i*Task),thrust::greater<float>()); cudaMemcpy(rawmark,dev_rawmark, sizeof(int)*pop*Task, cudaMemcpyDeviceToHost); Cook<< <pop,512>> >(dev_chormes,dev_pathset,pathd,dev_popmcap,dev_demand,dev_rawmark); } void NewGAParrel::process(){ } vector<pair<string,float> > NewGAParrel::GAsearch(){ cudamalloc(); cout<<"GA Parallel searching......."<<endl; float start=float(1000*clock())/ CLOCKS_PER_SEC; GoldnessMake(); cudapre(); best =Task*100*INFHOPS; int count = 0; int iter=0; thrust::device_ptr<float> dev_fv(dev_fit_value); thrust::device_ptr<int> dev_fk(dev_fit_key); vector<float>middata; int mkd=2; for (int i = 0; i <10000000; i++) { iter++; seed++; Fitor << <pop,1024 >> >(dev_hops,dev_capacity, dev_chormes, dev_demand, dev_pathset, pathd, dev_fit_key,dev_fit_value); cudaMemcpy(fit_key,dev_fit_key, sizeof(int)*pop, cudaMemcpyDeviceToHost); cudaMemcpy(fit_value,dev_fit_value, sizeof(float)*pop, cudaMemcpyDeviceToHost); /*for(int k=0;k<pop;k++) cout<<fit_value[k]<<" "; cout<<endl;*/ thrust::sort_by_key(dev_fv,dev_fv+pop ,dev_fk,thrust::less<float>()); cudaMemcpy(fit_value,dev_fit_value, sizeof(float)*pop, cudaMemcpyDeviceToHost); float ans=fit_value[0]; GetParents << <(Beta * 2 + 511) / 512, 512 >> >(dev_parents,dev_randarray,dev_seed); dim3 blocks_s(Task / 1024 + 1, Beta / 2 + 1); CudaCross << <blocks_s, 1024 >> >(dev_childs,dev_chormes,dev_fit_key,dev_randarray,dev_seed,dev_parents); GetMu << <Gama / 1024 + 1, 1024 >> >(dev_muinfo,dev_childs,dev_chormes,dev_randarray,dev_seed,dev_pathnum); dim3 blocks_s2(Task / 1024 + 1, Gama); CudaMutation << <blocks_s2, 1024 >> >(dev_muinfo, dev_childs, dev_chormes, dev_pathnum); dim3 blocks_s3(Task / 1024 + 1, Gama + Beta); Reload << <blocks_s3, 1024 >> >(dev_chormes, dev_childs, dev_fit_key); //cout<<ans<<" "<<best<<endl; if (ans<best) { mkd--; best = ans; count = 0; } else count++; middata.push_back(ans); if(mkd>0&&count<100) continue; time_t now=1000*clock()/ CLOCKS_PER_SEC; if (count>loomore||((now-start)>EXPIRE&&GANOEX<0)) break; } cudaMemcpy(chormes,dev_chormes, Task*pop*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(fit_key,dev_fit_key, sizeof(int)*pop, cudaMemcpyDeviceToHost); float end = float(1000*clock())/ CLOCKS_PER_SEC; pair<float,int>md=more(); vector<pair<string,float>>rdata; float lowbound=0; for(int i=0;i<Task;i++) lowbound+=demand[i]*INFHOPS; float gap=middata[middata.size()-1]-best; cout<<"gap is"<<gap<<endl; for(int i=0;i<middata.size();i++) middata[i]-=gap; CheckR(&G,Result,serv,string("GA_Paralle")); //cout<<"affier is:"<<affier<<endl; writejsoniter(GAPFILE,middata,string("GA_Paralle")); rdata.push_back(make_pair(string("object"),best)); rdata.push_back(make_pair(string("inf_obj"),lowbound)); rdata.push_back(make_pair(string("task_add_in"),md.second)); rdata.push_back(make_pair(string("flow_add_in"),md.first)); rdata.push_back(make_pair(string("total_weight"),totalweight)); rdata.push_back(make_pair(string("time"),(end-start)+affier)); rdata.push_back(make_pair(string("iter_num"),iter)); rdata.push_back(make_pair(string("iter_time"),float(end-start+affier)/iter)); rdata.push_back(make_pair(string("gap"),gap)); writejsondata(DATAFILE,rdata,string("GA_Paralle")); return rdata; }
fb7e2447b2a77af2a7be7ca16788cdbadf2b2d0e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include "linalg/add.cuh" #include "linalg/subtract.cuh" #include "linalg/unary_op.cuh" #include "random/rng.cuh" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T, typename IdxType = int> struct DevScalarInputs { T tolerance; IdxType len; T scalar; bool add; unsigned long long int seed; }; // Or else, we get the following compilation error // for an extended __device__ lambda cannot have private or protected access // within its class template <typename T, typename IdxType = int> void unaryOpLaunch(T *out, const T *in, T scalar, IdxType len, bool add, hipStream_t stream) { unaryOp( out, in, len, [scalar, add] __device__(T in) { return add ? in + scalar : in - scalar; }, stream); } template <typename T, typename IdxType> class DevScalarTest : public ::testing::TestWithParam<DevScalarInputs<T, IdxType>> { protected: void SetUp() override { params = ::testing::TestWithParam<DevScalarInputs<T, IdxType>>::GetParam(); Random::Rng r(params.seed); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); auto len = params.len; allocate(in, len); allocate(out_ref, len); allocate(out, len); allocate(scalar, (size_t)1); updateDevice(scalar, &params.scalar, 1, stream); r.uniform(in, len, T(-1.0), T(1.0), stream); unaryOpLaunch(out_ref, in, params.scalar, len, params.add, stream); if (params.add) { addDevScalar(out, in, scalar, len, stream); } else { subtractDevScalar(out, in, scalar, len, stream); } CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(in)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); CUDA_CHECK(hipFree(scalar)); } protected: DevScalarInputs<T, IdxType> params; T *in, *out_ref, *out, *scalar; }; const std::vector<DevScalarInputs<float, int>> inputsf_i32 = { {0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}}; typedef DevScalarTest<float, int> DevScalarTestF_i32; TEST_P(DevScalarTestF_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i32, ::testing::ValuesIn(inputsf_i32)); const std::vector<DevScalarInputs<float, size_t>> inputsf_i64 = { {0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}}; typedef DevScalarTest<float, size_t> DevScalarTestF_i64; TEST_P(DevScalarTestF_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i64, ::testing::ValuesIn(inputsf_i64)); const std::vector<DevScalarInputs<double, int>> inputsd_i32 = { {0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}}; typedef DevScalarTest<double, int> DevScalarTestD_i32; TEST_P(DevScalarTestD_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i32, ::testing::ValuesIn(inputsd_i32)); const std::vector<DevScalarInputs<double, size_t>> inputsd_i64 = { {0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}}; typedef DevScalarTest<double, size_t> DevScalarTestD_i64; TEST_P(DevScalarTestD_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i64, ::testing::ValuesIn(inputsd_i64)); } // end namespace LinAlg } // end namespace MLCommon
fb7e2447b2a77af2a7be7ca16788cdbadf2b2d0e.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include "linalg/add.cuh" #include "linalg/subtract.cuh" #include "linalg/unary_op.cuh" #include "random/rng.cuh" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T, typename IdxType = int> struct DevScalarInputs { T tolerance; IdxType len; T scalar; bool add; unsigned long long int seed; }; // Or else, we get the following compilation error // for an extended __device__ lambda cannot have private or protected access // within its class template <typename T, typename IdxType = int> void unaryOpLaunch(T *out, const T *in, T scalar, IdxType len, bool add, cudaStream_t stream) { unaryOp( out, in, len, [scalar, add] __device__(T in) { return add ? in + scalar : in - scalar; }, stream); } template <typename T, typename IdxType> class DevScalarTest : public ::testing::TestWithParam<DevScalarInputs<T, IdxType>> { protected: void SetUp() override { params = ::testing::TestWithParam<DevScalarInputs<T, IdxType>>::GetParam(); Random::Rng r(params.seed); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); auto len = params.len; allocate(in, len); allocate(out_ref, len); allocate(out, len); allocate(scalar, (size_t)1); updateDevice(scalar, &params.scalar, 1, stream); r.uniform(in, len, T(-1.0), T(1.0), stream); unaryOpLaunch(out_ref, in, params.scalar, len, params.add, stream); if (params.add) { addDevScalar(out, in, scalar, len, stream); } else { subtractDevScalar(out, in, scalar, len, stream); } CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(in)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); CUDA_CHECK(cudaFree(scalar)); } protected: DevScalarInputs<T, IdxType> params; T *in, *out_ref, *out, *scalar; }; const std::vector<DevScalarInputs<float, int>> inputsf_i32 = { {0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}}; typedef DevScalarTest<float, int> DevScalarTestF_i32; TEST_P(DevScalarTestF_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i32, ::testing::ValuesIn(inputsf_i32)); const std::vector<DevScalarInputs<float, size_t>> inputsf_i64 = { {0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}}; typedef DevScalarTest<float, size_t> DevScalarTestF_i64; TEST_P(DevScalarTestF_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i64, ::testing::ValuesIn(inputsf_i64)); const std::vector<DevScalarInputs<double, int>> inputsd_i32 = { {0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}}; typedef DevScalarTest<double, int> DevScalarTestD_i32; TEST_P(DevScalarTestD_i32, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i32, ::testing::ValuesIn(inputsd_i32)); const std::vector<DevScalarInputs<double, size_t>> inputsd_i64 = { {0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}}; typedef DevScalarTest<double, size_t> DevScalarTestD_i64; TEST_P(DevScalarTestD_i64, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i64, ::testing::ValuesIn(inputsd_i64)); } // end namespace LinAlg } // end namespace MLCommon
66592e3eec37461e0a13186e6050958af8f82dde.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuspatial_test/base_fixture.hpp> #include <cuspatial/error.hpp> #include <cuspatial/trajectory.hpp> #include <cudf_test/column_utilities.hpp> #include <rmm/device_uvector.hpp> struct TrajectoryDistanceSpeedErrorTest : public cuspatial::test::BaseFixture {}; TEST_F(TrajectoryDistanceSpeedErrorTest, SizeMismatch) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } { auto id = cudf::column( rmm::device_uvector<int>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } { auto id = cudf::column( rmm::device_uvector<int>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } } TEST_F(TrajectoryDistanceSpeedErrorTest, TypeError) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); // not integer auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } { auto id = cudf::column( rmm::device_uvector<int>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); // not timestamp EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } { // x-y type mismatch auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<double>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } } TEST_F(TrajectoryDistanceSpeedErrorTest, Nulls) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto nulls = rmm::device_uvector<int>(1000, rmm::cuda_stream_default); hipMemsetAsync(nulls.data(), 0xcccc, nulls.size(), rmm::cuda_stream_default.value()); auto nulls_buffer = nulls.release(); id.set_null_mask(nulls_buffer, 4000); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } }
66592e3eec37461e0a13186e6050958af8f82dde.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuspatial_test/base_fixture.hpp> #include <cuspatial/error.hpp> #include <cuspatial/trajectory.hpp> #include <cudf_test/column_utilities.hpp> #include <rmm/device_uvector.hpp> struct TrajectoryDistanceSpeedErrorTest : public cuspatial::test::BaseFixture {}; TEST_F(TrajectoryDistanceSpeedErrorTest, SizeMismatch) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } { auto id = cudf::column( rmm::device_uvector<int>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } { auto id = cudf::column( rmm::device_uvector<int>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } } TEST_F(TrajectoryDistanceSpeedErrorTest, TypeError) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); // not integer auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } { auto id = cudf::column( rmm::device_uvector<int>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); // not timestamp EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } { // x-y type mismatch auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<double>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } } TEST_F(TrajectoryDistanceSpeedErrorTest, Nulls) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ts = cudf::column(rmm::device_uvector<cudf::timestamp_ms>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto nulls = rmm::device_uvector<int>(1000, rmm::cuda_stream_default); cudaMemsetAsync(nulls.data(), 0xcccc, nulls.size(), rmm::cuda_stream_default.value()); auto nulls_buffer = nulls.release(); id.set_null_mask(nulls_buffer, 4000); EXPECT_THROW(cuspatial::trajectory_distances_and_speeds(1, id, xs, ys, ts, this->mr()), cuspatial::logic_error); } }
16d9da4d64c46a6cfbcad1b54c3dab47c9d4e2b7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define MAX 9 #define MAX_RAND_TRIES 100 int ROWS = 9; int COLUMNS = 9; char* FILENAME; //char* concat(char *s1, char *s2); void printGridToFile(int* board); void startSeq(char* name); int main ( int argc, char *argv[] ) { // Read in file if (argc < 2) { printf("Sorry, we need a command line argument with the sudoku puzzle to solve"); exit(0); } else { char* name; name = strtok(argv[1], "."); FILENAME = name; startSeq(FILENAME); } } void printGrid(int* board) { int copyGrid[ROWS][COLUMNS]; memcpy(copyGrid, board, sizeof(int) * ROWS * COLUMNS); printf("\n"); for (int row = 0; row < ROWS; row++) { for (int column = 0; column < COLUMNS; column++) { printf("%d ", copyGrid[row][column]); } printf("\n"); } printf("\n"); } void extractNumbers(char* fileName, int* grid) { FILE *input; input = fopen(fileName, "r"); char inp; for (int row = 0; row < ROWS; row++) { for (int column = 0; column < COLUMNS; column++) { fscanf(input," %c", &inp); int number = inp - '0'; grid[row * COLUMNS + column] = number; } } fclose(input); } __device__ void d_swap (int *a, int *b) { int temp = *a; *a = *b; *b = temp; } __device__ void d_randomize(int nineArray[], hiprandState_t state) { int tryValid = hiprand(&state) % MAX; for (int i = 8; i > 0; i--) { int j = hiprand(&state) % (i+1); d_swap(&nineArray[i], &nineArray[j]); } } __device__ int d_numberPlacementValid(int numberToCheck, int checkingRow, int checkingColumn, int board[MAX][MAX]) { // Check if number to check exists in Column int boardValue = 0; for (int row = 0; row < MAX; row++) { boardValue = board[row][checkingColumn]; if (boardValue == numberToCheck) { return 0; } } // Check if number to check exists in Row for (int column = 0; column < MAX; column++) { boardValue = board[checkingRow][column]; if (boardValue == numberToCheck) { return 0; } } // Check if exists in 3 x 3 grid int rowGrid = checkingRow / 3; int columnGrid = checkingColumn / 3; for (int rowAdd = 0; rowAdd < 3; rowAdd++) { for (int colAdd = 0; colAdd < 3; colAdd++) { int rowValue = (rowGrid * 3) + rowAdd; int colValue = (columnGrid * 3) + colAdd; boardValue = board[rowValue][colValue]; if (boardValue == numberToCheck) { return 0; } } } return 1; } __global__ void replaceZeros(int* d_sudoku, int* d_sudoku_solution, int timeCalled) { __shared__ int shared_sudoku[9][9]; int thread_x = threadIdx.x; int thread_y = threadIdx.y; int blockId = blockIdx.x + blockIdx.y *gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; shared_sudoku[thread_x][thread_y] = d_sudoku[thread_x+ 9*thread_y]; // Synch threads to synch shared data __syncthreads(); hiprandState_t state; hiprand_init(threadId, gridDim.y/2, timeCalled, &state); // Create thread individual sudoku board int local_sudoku[9][9]; for (int row = 0; row < 9; row++) { for (int col = 0; col < 9; col++) { local_sudoku[row][col] = shared_sudoku[row][col]; } } // For each element, try to random a value that is valid for (int row = 0; row < 9; row++) { for (int col = 0; col < 9; col++) { if (local_sudoku[row][col] == 0) { int insertNum = 0; int nineArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; d_randomize(nineArray, state); for (int i = 0; i < 9; i++) { if (d_numberPlacementValid(nineArray[i], row, col, local_sudoku)) { insertNum = nineArray[i]; break; } } if (insertNum == 0) { return; } else { local_sudoku[row][col] = insertNum; } } } } // Only get here with solved sudoku puzzle // Placing values in solution for (int row = 0; row < 9; row++) { for (int col = 0; col < 9; col++) { d_sudoku_solution[row * 9 + col] = local_sudoku[row][col]; } } } void printGridToFile(int* board) { char* extension = ".sol"; int stringSize = strlen(FILENAME) + 3; char outputFileName[stringSize]; strcpy(outputFileName, FILENAME); strcpy(outputFileName, extension); //char* outputFileName = concat(FILENAME, extension); //printf("Output name is %s", outputFileName); FILE *ofp; ofp = fopen(outputFileName, "w"); int copyGrid[ROWS][COLUMNS]; memcpy(copyGrid, board, sizeof(int) * ROWS * COLUMNS); //printf("\n"); for (int row = 0; row < ROWS; row++) { for (int column = 0; column < COLUMNS; column++) { //printf("%d ", board[row * COLUMNS + column]); fprintf(ofp, "%d ", copyGrid[row][column]); } fprintf(ofp, "\n"); } fclose(ofp); } void startSeq(char* name) { int originalGrid[ROWS][COLUMNS]; FILENAME = name; extractNumbers(name, &originalGrid[0][0]); //printGrid(&originalGrid[0][0]); int sudokuSize = sizeof(int) * 81; int *d_sudoku; int *sudoku; int *d_sudoku_solution; int *sudoku_solution; hipHostMalloc((void**)&sudoku, sudokuSize, hipHostMallocDefault); hipHostMalloc((void**)&sudoku_solution, sudokuSize, hipHostMallocDefault); for (int row = 0; row < ROWS; row++) { for (int col = 0; col < COLUMNS; col++) { sudoku[row * 9 + col] = originalGrid[row][col]; sudoku_solution[row * 9 + col] = 0; } } dim3 dimGrid(12, 15); dim3 dimBlock(9, 9); hipMalloc((void**)&d_sudoku, sudokuSize); hipMalloc((void**)&d_sudoku_solution, sudokuSize); hipMemcpy(d_sudoku, sudoku, sudokuSize, hipMemcpyHostToDevice); hipLaunchKernelGGL(( replaceZeros), dim3(dimGrid),dim3(dimBlock), 0, 0, d_sudoku, d_sudoku_solution, 0); hipMemcpy(sudoku_solution, d_sudoku_solution, sudokuSize, hipMemcpyDeviceToHost); hipFree(d_sudoku); hipFree(d_sudoku_solution); //printf("We found solution"); //printGrid(sudoku_solution); printGridToFile(sudoku_solution); }
16d9da4d64c46a6cfbcad1b54c3dab47c9d4e2b7.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <cuda.h> #include <cuda_profiler_api.h> #include <curand.h> #include <curand_kernel.h> #define MAX 9 #define MAX_RAND_TRIES 100 int ROWS = 9; int COLUMNS = 9; char* FILENAME; //char* concat(char *s1, char *s2); void printGridToFile(int* board); void startSeq(char* name); int main ( int argc, char *argv[] ) { // Read in file if (argc < 2) { printf("Sorry, we need a command line argument with the sudoku puzzle to solve"); exit(0); } else { char* name; name = strtok(argv[1], "."); FILENAME = name; startSeq(FILENAME); } } void printGrid(int* board) { int copyGrid[ROWS][COLUMNS]; memcpy(copyGrid, board, sizeof(int) * ROWS * COLUMNS); printf("\n"); for (int row = 0; row < ROWS; row++) { for (int column = 0; column < COLUMNS; column++) { printf("%d ", copyGrid[row][column]); } printf("\n"); } printf("\n"); } void extractNumbers(char* fileName, int* grid) { FILE *input; input = fopen(fileName, "r"); char inp; for (int row = 0; row < ROWS; row++) { for (int column = 0; column < COLUMNS; column++) { fscanf(input," %c", &inp); int number = inp - '0'; grid[row * COLUMNS + column] = number; } } fclose(input); } __device__ void d_swap (int *a, int *b) { int temp = *a; *a = *b; *b = temp; } __device__ void d_randomize(int nineArray[], curandState_t state) { int tryValid = curand(&state) % MAX; for (int i = 8; i > 0; i--) { int j = curand(&state) % (i+1); d_swap(&nineArray[i], &nineArray[j]); } } __device__ int d_numberPlacementValid(int numberToCheck, int checkingRow, int checkingColumn, int board[MAX][MAX]) { // Check if number to check exists in Column int boardValue = 0; for (int row = 0; row < MAX; row++) { boardValue = board[row][checkingColumn]; if (boardValue == numberToCheck) { return 0; } } // Check if number to check exists in Row for (int column = 0; column < MAX; column++) { boardValue = board[checkingRow][column]; if (boardValue == numberToCheck) { return 0; } } // Check if exists in 3 x 3 grid int rowGrid = checkingRow / 3; int columnGrid = checkingColumn / 3; for (int rowAdd = 0; rowAdd < 3; rowAdd++) { for (int colAdd = 0; colAdd < 3; colAdd++) { int rowValue = (rowGrid * 3) + rowAdd; int colValue = (columnGrid * 3) + colAdd; boardValue = board[rowValue][colValue]; if (boardValue == numberToCheck) { return 0; } } } return 1; } __global__ void replaceZeros(int* d_sudoku, int* d_sudoku_solution, int timeCalled) { __shared__ int shared_sudoku[9][9]; int thread_x = threadIdx.x; int thread_y = threadIdx.y; int blockId = blockIdx.x + blockIdx.y *gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; shared_sudoku[thread_x][thread_y] = d_sudoku[thread_x+ 9*thread_y]; // Synch threads to synch shared data __syncthreads(); curandState_t state; curand_init(threadId, gridDim.y/2, timeCalled, &state); // Create thread individual sudoku board int local_sudoku[9][9]; for (int row = 0; row < 9; row++) { for (int col = 0; col < 9; col++) { local_sudoku[row][col] = shared_sudoku[row][col]; } } // For each element, try to random a value that is valid for (int row = 0; row < 9; row++) { for (int col = 0; col < 9; col++) { if (local_sudoku[row][col] == 0) { int insertNum = 0; int nineArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9}; d_randomize(nineArray, state); for (int i = 0; i < 9; i++) { if (d_numberPlacementValid(nineArray[i], row, col, local_sudoku)) { insertNum = nineArray[i]; break; } } if (insertNum == 0) { return; } else { local_sudoku[row][col] = insertNum; } } } } // Only get here with solved sudoku puzzle // Placing values in solution for (int row = 0; row < 9; row++) { for (int col = 0; col < 9; col++) { d_sudoku_solution[row * 9 + col] = local_sudoku[row][col]; } } } void printGridToFile(int* board) { char* extension = ".sol"; int stringSize = strlen(FILENAME) + 3; char outputFileName[stringSize]; strcpy(outputFileName, FILENAME); strcpy(outputFileName, extension); //char* outputFileName = concat(FILENAME, extension); //printf("Output name is %s", outputFileName); FILE *ofp; ofp = fopen(outputFileName, "w"); int copyGrid[ROWS][COLUMNS]; memcpy(copyGrid, board, sizeof(int) * ROWS * COLUMNS); //printf("\n"); for (int row = 0; row < ROWS; row++) { for (int column = 0; column < COLUMNS; column++) { //printf("%d ", board[row * COLUMNS + column]); fprintf(ofp, "%d ", copyGrid[row][column]); } fprintf(ofp, "\n"); } fclose(ofp); } void startSeq(char* name) { int originalGrid[ROWS][COLUMNS]; FILENAME = name; extractNumbers(name, &originalGrid[0][0]); //printGrid(&originalGrid[0][0]); int sudokuSize = sizeof(int) * 81; int *d_sudoku; int *sudoku; int *d_sudoku_solution; int *sudoku_solution; cudaHostAlloc((void**)&sudoku, sudokuSize, cudaHostAllocDefault); cudaHostAlloc((void**)&sudoku_solution, sudokuSize, cudaHostAllocDefault); for (int row = 0; row < ROWS; row++) { for (int col = 0; col < COLUMNS; col++) { sudoku[row * 9 + col] = originalGrid[row][col]; sudoku_solution[row * 9 + col] = 0; } } dim3 dimGrid(12, 15); dim3 dimBlock(9, 9); cudaMalloc((void**)&d_sudoku, sudokuSize); cudaMalloc((void**)&d_sudoku_solution, sudokuSize); cudaMemcpy(d_sudoku, sudoku, sudokuSize, cudaMemcpyHostToDevice); replaceZeros<<<dimGrid,dimBlock>>>(d_sudoku, d_sudoku_solution, 0); cudaMemcpy(sudoku_solution, d_sudoku_solution, sudokuSize, cudaMemcpyDeviceToHost); cudaFree(d_sudoku); cudaFree(d_sudoku_solution); //printf("We found solution"); //printGrid(sudoku_solution); printGridToFile(sudoku_solution); }
ad7b71bae9dda13cf2178127e8fa01d155efed60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <string> #include "paddle/fluid/operators/interpolate_v2_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { using framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> __global__ void KeNearestNeighborInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); if (data_layout == DataLayout::kNCHW) { out[tid] = in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } } } template <typename T> __global__ void KeNearestNeighborInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T out_pos = out[out_id_h * output_w + out_id_w]; platform::CudaAtomicAdd(in_pos, out_pos); } } template <typename T> __global__ void KeLinearInterpFw(const T* in, const size_t in_img_w, const size_t input_w, T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id]; } else { const T* in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]; } } } template <typename T> __global__ void KeLinearInterpBw(T* in, const size_t in_img_w, const size_t input_w, const T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeBilinearInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w] + w1lambda * in_pos[h_id * in_img_w + w_id]); } else { const T* in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w * num_channels] + w1lambda * in_pos[h_id * in_img_w * num_channels + w_id * num_channels]); } } } template <typename T> __global__ void KeBilinearInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5 : ratio_h * out_img_idy; in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_h * output_w + out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id], h1lambda * w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w * num_channels], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos[h_id * in_img_w * num_channels + w_id * num_channels], h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeTrilinearInterpFw( const T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_d, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w] + w1lambda * in_pos1[h_id * in_img_w + w_id])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w] + w1lambda * in_pos2[h_id * in_img_w + w_id])); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id * num_channels]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] + w1lambda * in_pos1[h_id * in_img_w * num_channels + w_id * num_channels])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id * num_channels]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] + w1lambda * in_pos2[h_id * in_img_w * num_channels + w_id * num_channels])); } } } template <typename T> __global__ void KeTrilinearInterpBw( T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_d, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w + w_id], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w + w_id], d1lambda * h1lambda * w1lambda * out_pos[0]); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id * num_channels], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w * num_channels], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos1[h_id * in_img_w * num_channels + w_id * num_channels], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id * num_channels], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w * num_channels], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos2[h_id * in_img_w * num_channels + w_id * num_channels], d1lambda * h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __device__ __forceinline__ static T Kecubic_interp(const T x0, const T x1, const T x2, const T x3, T t) { T coeffs[4]; T a = -0.75; T x_1 = t; T x_2 = 1.0 - t; coeffs[0] = cubic_convolution2<T>(x_1 + 1.0, a); coeffs[1] = cubic_convolution1<T>(x_1, a); coeffs[2] = cubic_convolution1<T>(x_2, a); coeffs[3] = cubic_convolution2<T>(x_2 + 1.0, a); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> __global__ void KeBicubicInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T coefficients[4]; const T* in_pos_0; const T* in_pos_1; const T* in_pos_2; const T* in_pos_3; int access_x_0; if (data_layout == DataLayout::kNCHW) { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0); access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0); in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_0]; in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_1]; in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_2]; in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_3]; coefficients[k] = Kecubic_interp<T>(in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0); int access_x_0 = max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0); const T* in_pos_0 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_0 * num_channels + channel_id]; const T* in_pos_1 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_1 * num_channels + channel_id]; const T* in_pos_2 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_2 * num_channels + channel_id]; const T* in_pos_3 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_3 * num_channels + channel_id]; coefficients[k] = Kecubic_interp(in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = static_cast<T>(Kecubic_interp(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t)); } } } template <typename T> __global__ void KeBicubicInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients(x_coeffs, x_t); get_cubic_upsample_coefficients(y_coeffs, y_t); const T* out_pos = &out[out_id_h * output_w + out_id_w]; T* in_pos; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { int access_y = max(min(static_cast<int>(input_y - 1 + j), static_cast<int>(in_img_h - 1)), 0); int access_x = max(min(static_cast<int>(input_x - 1 + i), static_cast<int>(in_img_w - 1)), 0); if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x]; } else { in_pos = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x * num_channels + channel_id]; } platform::CudaAtomicAdd(&in_pos[0], (out_pos[0] * y_coeffs[j] * x_coeffs[i])); } } } } template <typename T> static void Interpolate1DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1; if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_w = new_size[0]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0) : static_cast<float>(new_scale_w); } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { hipLaunchKernelGGL(( KeLinearInterpFw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_w, in_cw, output_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1; float scale_h = -1; if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_w = scale[1]; scale_h = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0. && scale_h > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { hipLaunchKernelGGL(( KeNearestNeighborInterpFw< T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { hipLaunchKernelGGL(( KeBilinearInterpFw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { hipLaunchKernelGGL(( KeBicubicInterpFw<T>), dim3(config.block_per_grid), dim3(512), 0, ctx.cuda_device_context().stream(), input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1; float scale_d = -1; float scale_h = -1; if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { hipLaunchKernelGGL(( KeTrilinearInterpFw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_d, in_h, in_w, n, in_cdhw, output_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { hipLaunchKernelGGL(( KeLinearInterpBw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_w, in_cw, output_grad_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_w = scale[1]; scale_h = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0. && scale_h > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { hipLaunchKernelGGL(( KeNearestNeighborInterpBw< T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { hipLaunchKernelGGL(( KeBilinearInterpBw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { hipLaunchKernelGGL(( KeBicubicInterpBw<T>), dim3(config.block_per_grid), dim3(512), 0, ctx.cuda_device_context().stream(), input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { hipLaunchKernelGGL(( KeTrilinearInterpBw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_d, in_h, in_w, n, in_cdhw, output_grad_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateOpV2CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCUDAFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation Interpolate1DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation Interpolate2DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation Interpolate3DCUDABwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(bilinear_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bilinear_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(nearest_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(nearest_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(trilinear_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(trilinear_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(linear_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(linear_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(bicubic_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bicubic_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>);
ad7b71bae9dda13cf2178127e8fa01d155efed60.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <string> #include "paddle/fluid/operators/interpolate_v2_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { using framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> __global__ void KeNearestNeighborInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); if (data_layout == DataLayout::kNCHW) { out[tid] = in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } } } template <typename T> __global__ void KeNearestNeighborInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T out_pos = out[out_id_h * output_w + out_id_w]; platform::CudaAtomicAdd(in_pos, out_pos); } } template <typename T> __global__ void KeLinearInterpFw(const T* in, const size_t in_img_w, const size_t input_w, T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id]; } else { const T* in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]; } } } template <typename T> __global__ void KeLinearInterpBw(T* in, const size_t in_img_w, const size_t input_w, const T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeBilinearInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w] + w1lambda * in_pos[h_id * in_img_w + w_id]); } else { const T* in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w * num_channels] + w1lambda * in_pos[h_id * in_img_w * num_channels + w_id * num_channels]); } } } template <typename T> __global__ void KeBilinearInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5 : ratio_h * out_img_idy; in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_h * output_w + out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id], h1lambda * w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w * num_channels], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos[h_id * in_img_w * num_channels + w_id * num_channels], h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeTrilinearInterpFw( const T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_d, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w] + w1lambda * in_pos1[h_id * in_img_w + w_id])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w] + w1lambda * in_pos2[h_id * in_img_w + w_id])); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id * num_channels]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] + w1lambda * in_pos1[h_id * in_img_w * num_channels + w_id * num_channels])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id * num_channels]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] + w1lambda * in_pos2[h_id * in_img_w * num_channels + w_id * num_channels])); } } } template <typename T> __global__ void KeTrilinearInterpBw( T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_d, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w + w_id], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w + w_id], d1lambda * h1lambda * w1lambda * out_pos[0]); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id * num_channels], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w * num_channels], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos1[h_id * in_img_w * num_channels + w_id * num_channels], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id * num_channels], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w * num_channels], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos2[h_id * in_img_w * num_channels + w_id * num_channels], d1lambda * h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __device__ __forceinline__ static T Kecubic_interp(const T x0, const T x1, const T x2, const T x3, T t) { T coeffs[4]; T a = -0.75; T x_1 = t; T x_2 = 1.0 - t; coeffs[0] = cubic_convolution2<T>(x_1 + 1.0, a); coeffs[1] = cubic_convolution1<T>(x_1, a); coeffs[2] = cubic_convolution1<T>(x_2, a); coeffs[3] = cubic_convolution2<T>(x_2 + 1.0, a); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> __global__ void KeBicubicInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T coefficients[4]; const T* in_pos_0; const T* in_pos_1; const T* in_pos_2; const T* in_pos_3; int access_x_0; if (data_layout == DataLayout::kNCHW) { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0); access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0); in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_0]; in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_1]; in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_2]; in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_3]; coefficients[k] = Kecubic_interp<T>(in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0); int access_x_0 = max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0); const T* in_pos_0 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_0 * num_channels + channel_id]; const T* in_pos_1 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_1 * num_channels + channel_id]; const T* in_pos_2 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_2 * num_channels + channel_id]; const T* in_pos_3 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_3 * num_channels + channel_id]; coefficients[k] = Kecubic_interp(in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = static_cast<T>(Kecubic_interp(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t)); } } } template <typename T> __global__ void KeBicubicInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients(x_coeffs, x_t); get_cubic_upsample_coefficients(y_coeffs, y_t); const T* out_pos = &out[out_id_h * output_w + out_id_w]; T* in_pos; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { int access_y = max(min(static_cast<int>(input_y - 1 + j), static_cast<int>(in_img_h - 1)), 0); int access_x = max(min(static_cast<int>(input_x - 1 + i), static_cast<int>(in_img_w - 1)), 0); if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x]; } else { in_pos = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x * num_channels + channel_id]; } platform::CudaAtomicAdd(&in_pos[0], (out_pos[0] * y_coeffs[j] * x_coeffs[i])); } } } } template <typename T> static void Interpolate1DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1; if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_w = new_size[0]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0) : static_cast<float>(new_scale_w); } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { KeLinearInterpFw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_data, in_w, in_cw, output_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1; float scale_h = -1; if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_w = scale[1]; scale_h = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0. && scale_h > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { KeNearestNeighborInterpFw< T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { KeBilinearInterpFw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { KeBicubicInterpFw<T><<<config.block_per_grid, 512, 0, ctx.cuda_device_context().stream()>>>( input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1; float scale_d = -1; float scale_h = -1; if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { KeTrilinearInterpFw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_data, in_d, in_h, in_w, n, in_cdhw, output_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ(scale_w > 0, true, platform::errors::InvalidArgument( "scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { KeLinearInterpBw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_w, in_cw, output_grad_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_w = scale[1]; scale_h = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_w > 0. && scale_h > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { KeNearestNeighborInterpBw< T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { KeBilinearInterpBw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { KeBicubicInterpBw<T><<<config.block_per_grid, 512, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0 && scale_h > 0 && scale_d > 0, true, platform::errors::InvalidArgument("scale of Op(interpolate) " "should be greater than 0.")); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { KeTrilinearInterpBw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_d, in_h, in_w, n, in_cdhw, output_grad_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateOpV2CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCUDAFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation Interpolate1DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation Interpolate2DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation Interpolate3DCUDABwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(bilinear_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bilinear_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(nearest_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(nearest_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(trilinear_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(trilinear_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(linear_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(linear_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(bicubic_interp_v2, ops::InterpolateOpV2CUDAKernel<float>, ops::InterpolateOpV2CUDAKernel<double>, ops::InterpolateOpV2CUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bicubic_interp_v2_grad, ops::InterpolateV2GradOpCUDAKernel<float>, ops::InterpolateV2GradOpCUDAKernel<double>);
a80617f436de56d02cc1a55ab59a5d5e9d4adb3a.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include <vector> #include <hip/hip_runtime.h> #include "NPP_staging.hpp" texture<Ncv8u, 1, hipReadModeElementType> tex8u; texture<Ncv32u, 1, hipReadModeElementType> tex32u; texture<uint2, 1, hipReadModeElementType> tex64u; //============================================================================== // // CUDA streams handling // //============================================================================== static hipStream_t nppStream = 0; hipStream_t nppStGetActiveCUDAstream(void) { return nppStream; } hipStream_t nppStSetActiveCUDAstream(hipStream_t cudaStream) { hipStream_t tmp = nppStream; nppStream = cudaStream; return tmp; } //============================================================================== // // BlockScan.cuh // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 template <class T> inline __device__ T warpScanInclusive(T idata, volatile T *s_Data) { Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; } template <class T> inline __device__ T warpScanExclusive(T idata, volatile T *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <class T, Ncv32u tiNumScanThreads> inline __device__ T blockScanInclusive(T idata, volatile T *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan T warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements T val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // IntegralImage.cu // //============================================================================== const Ncv32u NUM_SCAN_THREADS = 256; const Ncv32u LOG2_NUM_SCAN_THREADS = 8; template<class T_in, class T_out> struct _scanElemOp { template<bool tbDoSqr> static inline __host__ __device__ T_out scanElemOp(T_in elem) { return scanElemOp( elem, Int2Type<(int)tbDoSqr>() ); } private: template <int v> struct Int2Type { enum { value = v }; }; static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>) { return (T_out)elem; } static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>) { return (T_out)(elem*elem); } }; template<class T> inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs); template<> inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs); } template<> inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } template<> inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } /** * \brief Segmented scan kernel * * Calculates per-row prefix scans of the input image. * Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * \tparam T_op Defines an operation to be performed on the input image pixels * * \param d_src [IN] Source image pointer * \param srcWidth [IN] Source image width * \param srcStride [IN] Source image stride * \param d_II [OUT] Output image pointer * \param IIstride [IN] Output image stride * * \return None */ template <class T_in, class T_out, bool tbDoSqr> __global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride, T_out *d_II, Ncv32u IIstride) { //advance pointers to the current line if (sizeof(T_in) != 1) { d_src += srcStride * blockIdx.x; } //for initial image 8bit source we use texref tex8u d_II += IIstride * blockIdx.x; Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS; Ncv32u offsetX = 0; __shared__ T_out shmem[NUM_SCAN_THREADS * 2]; __shared__ T_out carryElem; carryElem = 0; __syncthreads(); while (numBuckets--) { Ncv32u curElemOffs = offsetX + threadIdx.x; T_out curScanElem; T_in curElem; T_out curElemMod; if (curElemOffs < srcWidth) { //load elements curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs); } curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem); //inclusive scan curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem); if (curElemOffs <= srcWidth) { //make scan exclusive and write the bucket to the output buffer d_II[curElemOffs] = carryElem + curScanElem - curElemMod; offsetX += NUM_SCAN_THREADS; } //remember last element for subsequent buckets adjustment __syncthreads(); if (threadIdx.x == NUM_SCAN_THREADS-1) { carryElem += curScanElem; } __syncthreads(); } if (offsetX == srcWidth && !threadIdx.x) { d_II[offsetX] = carryElem; } } template <bool tbDoSqr, class T_in, class T_out> NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride, T_out *d_dst, Ncv32u dstStride, NcvSize32u roi) { hipChannelFormatDesc cfdTex; size_t alignmentOffset = 0; if (sizeof(T_in) == 1) { cfdTex = hipCreateChannelDesc<Ncv8u>(); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); if (alignmentOffset > 0) { ncvAssertCUDAReturn(hipUnbindTexture(tex8u), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); } } hipLaunchKernelGGL(( scanRows <T_in, T_out, tbDoSqr>) , dim3(roi.height), dim3(NUM_SCAN_THREADS), 0, nppStGetActiveCUDAstream(), d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment) { Ncv32u alignMask = allocatorAlignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u dimBytes = dim * elemTypeSize; Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask; Ncv32u PaddedDim = pitch / elemTypeSize; return PaddedDim; } template <class T_in, class T_out> NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep, T_out *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) && dstStep >= (roi.width + 1) * sizeof(T_out) && srcStep % sizeof(T_in) == 0 && dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T_in); dstStep /= sizeof(T_out); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <false> (d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u), (Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false> (Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), (Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width && dstStep >= (roi.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64; Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64; NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax); ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64); ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <true, Ncv8u, Ncv32u> (d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u), Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false, Ncv32u, Ncv64u> (Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u), d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width, (Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f), (Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width, NULL, (roiSize.width+1) * sizeof(Ncv64u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv32u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep, Ncv32f *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv32u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) && dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv32u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv32u top = h_dst[(i-1) * dstStep + j]; Ncv32u left = h_dst[i * dstStep + (j - 1)]; Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep, Ncv32f *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) && dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) && srcStep % sizeof(Ncv32f) == 0 && dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(Ncv32f); dstStep /= sizeof(Ncv32f); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0.0f; for (Ncv32u j=1; j<WidthII; j++) { Ncv32f top = h_dst[(i-1) * dstStep + j]; Ncv32f left = h_dst[i * dstStep + (j - 1)]; Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv64u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv64u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv64u top = h_dst[(i-1) * dstStep + j]; Ncv64u left = h_dst[i * dstStep + (j - 1)]; Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem*elem + left - topleft + top; } } return NPPST_SUCCESS; } //============================================================================== // // Decimate.cu // //============================================================================== const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32; const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8; template<class T, NcvBool tbCacheTexture> __device__ T getElem_Decimate(Ncv32u x, T *d_src); template<> __device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src) { return tex1Dfetch(tex32u, x); } template<> __device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src) { return d_src[x]; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src) { return d_src[x]; } template <class T, NcvBool tbCacheTexture> __global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u dstRoi, Ncv32u scale) { int curX = blockIdx.x * blockDim.x + threadIdx.x; int curY = blockIdx.y * blockDim.y + threadIdx.y; if (curX >= dstRoi.width || curY >= dstRoi.height) { return; } d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src); } template <class T> static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X, (dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y); dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y); if (!readThruTexture) { hipLaunchKernelGGL(( decimate_C1R <T, false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStep, d_dst, dstStep, dstRoi, scale); } else { hipChannelFormatDesc cfdTexSrc; if (sizeof(T) == sizeof(Ncv32u)) { cfdTexSrc = hipCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } else { cfdTexSrc = hipCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } hipLaunchKernelGGL(( decimate_C1R <T, true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStep, d_dst, dstStep, dstRoi, scale); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep, T *h_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) && srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; for (Ncv32u i=0; i<dstRoi.height; i++) { for (Ncv32u j=0; j<dstRoi.width; j++) { h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale]; } } return NPPST_SUCCESS; } #define implementNppDecimate(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \ { \ return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, \ srcRoi, scale, readThruTexture); \ } #define implementNppDecimateHost(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale) \ { \ return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, \ srcRoi, scale); \ } implementNppDecimate(32, u) implementNppDecimate(32, s) implementNppDecimate(32, f) implementNppDecimate(64, u) implementNppDecimate(64, s) implementNppDecimate(64, f) implementNppDecimateHost(32, u) implementNppDecimateHost(32, s) implementNppDecimateHost(32, f) implementNppDecimateHost(64, u) implementNppDecimateHost(64, s) implementNppDecimateHost(64, f) //============================================================================== // // RectStdDev.cu // //============================================================================== const Ncv32u NUM_RECTSTDDEV_THREADS = 128; template <NcvBool tbCacheTexture> __device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum) { if (tbCacheTexture) { return tex1Dfetch(tex32u, x); } else { return d_sum[x]; } } template <NcvBool tbCacheTexture> __device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum) { if (tbCacheTexture) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } else { return d_sqsum[x]; } } template <NcvBool tbCacheTexture> __global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea) { Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x; if (x_offs >= roi.width) { return; } Ncv32u sum_offset = blockIdx.y * sumStep + x_offs; Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs; //OPT: try swapping order (could change cache hit/miss ratio) Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum); Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum); Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br; sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum); sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum); sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum); sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum); Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl; Ncv32f mean = sum_val * invRectArea; ////////////////////////////////////////////////////////////////////////// // sqsum_val_res = sqsum_val / rectArea ////////////////////////////////////////////////////////////////////////// Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val); Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1); Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2; Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3); sqsum_val_1 *= invRectArea; sqsum_val_4 *= invRectArea; Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4; ////////////////////////////////////////////////////////////////////////// // variance = sqsum_val_res - mean * mean ////////////////////////////////////////////////////////////////////////// #if defined DISABLE_MAD_SELECTIVELY Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean); #else Ncv32f variance = sqsum_val_res - mean * mean; #endif ////////////////////////////////////////////////////////////////////////// // stddev = sqrtf(variance) ////////////////////////////////////////////////////////////////////////// //Ncv32f stddev = sqrtf(variance); Ncv32f stddev = __fsqrt_rn(variance); d_norm[blockIdx.y * normStep + x_offs] = stddev; } NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea, NcvBool readThruTexture) { ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height); dim3 block(NUM_RECTSTDDEV_THREADS); if (!readThruTexture) { hipLaunchKernelGGL(( rectStdDev_32f_C1R <false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } else { hipChannelFormatDesc cfdTexSrc; hipChannelFormatDesc cfdTexSqr; cfdTexSrc = hipCreateChannelDesc<Ncv32u>(); cfdTexSqr = hipCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); hipLaunchKernelGGL(( rectStdDev_32f_C1R <true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep, Ncv64u *h_sqsum, Ncv32u sqsumStep, Ncv32f *h_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea) { ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; for (Ncv32u i=0; i<roi.height; i++) { for (Ncv32u j=0; j<roi.width; j++) { Ncv32u sum_offset = i * sumStep + j; Ncv32u sqsum_offset = i * sqsumStep + j; Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x]; Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x]; Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width]; Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width]; Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x]; Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x]; Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width]; Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width]; Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl); Ncv64f mean = sum_val * invRectArea; Ncv64f sqsum_val_2 = sqsum_val / rectArea; Ncv64f variance = sqsum_val_2 - mean * mean; h_norm[i * normStep + j] = (Ncv32f)sqrt(variance); } } return NPPST_SUCCESS; } //============================================================================== // // Transpose.cu // //============================================================================== const Ncv32u TRANSPOSE_TILE_DIM = 16; const Ncv32u TRANSPOSE_BLOCK_ROWS = 16; /** * \brief Matrix transpose kernel * * Calculates transpose of the input image * \see TRANSPOSE_TILE_DIM * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * * \param d_src [IN] Source image pointer * \param srcStride [IN] Source image stride * \param d_dst [OUT] Output image pointer * \param dstStride [IN] Output image stride * * \return None */ template <class T> __global__ void transpose(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { __shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1]; Ncv32u blockIdx_x, blockIdx_y; // do diagonal reordering if (gridDim.x == gridDim.y) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x; } else { Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bid % gridDim.y; blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x; } Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x; Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y; Ncv32u index_gmem = xIndex + yIndex * srcStride; if (xIndex < srcRoi.width) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.height) { tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride]; } } } __syncthreads(); xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y; index_gmem = xIndex + yIndex * dstStride; if (xIndex < srcRoi.height) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.width) { d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i]; } } } } template <class T> NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM); dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM); hipLaunchKernelGGL(( transpose <T>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStride, d_dst, dstStride, srcRoi); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride, T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); for (Ncv32u i=0; i<srcRoi.height; i++) { for (Ncv32u j=0; j<srcRoi.width; j++) { h_dst[j*dstStride+i] = h_src[i*srcStride + j]; } } return NPPST_SUCCESS; } #define implementNppTranspose(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \ { \ return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, srcRoi); \ } #define implementNppTransposeHost(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi) \ { \ return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, srcRoi); \ } implementNppTranspose(32,u) implementNppTranspose(32,s) implementNppTranspose(32,f) implementNppTranspose(64,u) implementNppTranspose(64,s) implementNppTranspose(64,f) implementNppTransposeHost(32,u) implementNppTransposeHost(32,s) implementNppTransposeHost(32,f) implementNppTransposeHost(64,u) implementNppTransposeHost(64,s) implementNppTransposeHost(64,f) NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } //============================================================================== // // Compact.cu // //============================================================================== const Ncv32u NUM_REMOVE_THREADS = 256; template <bool bRemove, bool bWritePartial> __global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_blockSums, Ncv32u elemRemove) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn > srcLen + blockDim.x) { return; } __shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2]; Ncv32u scanElem = 0; if (elemAddrIn < srcLen) { if (bRemove) { scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0; } else { scanElem = d_src[elemAddrIn]; } } Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem); __syncthreads(); if (elemAddrIn < srcLen) { if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial) { d_blockSums[blockId] = localScanInc; } if (bRemove) { d_offsets[elemAddrIn] = localScanInc - scanElem; } else { d_src[elemAddrIn] = localScanInc - scanElem; } } } __global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } __shared__ Ncv32u valOffs; valOffs = d_blockSums[blockId]; __syncthreads(); d_offsets[elemAddrIn] += valOffs; } __global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_dst, Ncv32u elemRemove, Ncv32u *dstLenValue) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } Ncv32u elem = d_src[elemAddrIn]; Ncv32u elemAddrOut = d_offsets[elemAddrIn]; if (elem != elemRemove) { d_dst[elemAddrOut] = elem; } if (elemAddrIn == srcLen-1) { if (elem != elemRemove) { *dstLenValue = elemAddrOut + 1; } else { *dstLenValue = elemAddrOut; } } } NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *dstLenPinned, Ncv32u elemRemove, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLenPinned != NULL) { *dstLenPinned = 0; } return NPPST_SUCCESS; } std::vector<Ncv32u> partSumNums; std::vector<Ncv32u> partSumOffsets; Ncv32u partSumLastNum = srcLen; Ncv32u partSumLastOffs = 0; do { partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u), gpuAllocator.alignment()) / sizeof(Ncv32u); partSumLastOffs += curPartSumAlignedLength; partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS; } while (partSumLastNum>1); partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1); ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1); ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN dim3 block(NUM_REMOVE_THREADS); //calculate zero-level partial sums for indices calculation if (partSumNums.size() > 2) { dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( removePass1Scan <true, true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), d_hierSums.ptr() + partSumOffsets[1], elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //calculate hierarchical partial sums for (Ncv32u i=1; i<partSumNums.size()-1; i++) { dim3 grid_partial(partSumNums[i+1]); if (grid_partial.x > 65535) { grid_partial.y = (grid_partial.x + 65534) / 65535; grid_partial.x = 65535; } if (grid_partial.x != 1) { hipLaunchKernelGGL(( removePass1Scan <false, true>) , dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, d_hierSums.ptr() + partSumOffsets[i+1], NULL); } else { hipLaunchKernelGGL(( removePass1Scan <false, false>) , dim3(grid_partial), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, NULL, NULL); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //adjust hierarchical partial sums for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--) { dim3 grid_local(partSumNums[i+1]); if (grid_local.x > 65535) { grid_local.y = (grid_local.x + 65534) / 65535; grid_local.x = 65535; } hipLaunchKernelGGL(( removePass2Adjust) , dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], d_hierSums.ptr() + partSumOffsets[i+1]); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } } else { dim3 grid_local(partSumNums[1]); hipLaunchKernelGGL(( removePass1Scan <true, false>) , dim3(grid_local), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), NULL, elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //compact source vector using indices dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( removePass3Compact) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), d_dst, elemRemove, d_numDstElements.ptr()); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //get number of dst elements if (dstLenPinned != NULL) { ncvAssertCUDAReturn(hipMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u), hipMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); } NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { *pBufsize = 0; return NPPST_SUCCESS; } NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *p_dstLen, Ncv32u elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen, Ncv32s *d_dst, Ncv32u *p_dstLen, Ncv32s elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen, Ncv32f *d_dst, Ncv32u *p_dstLen, Ncv32f elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen, Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLen != NULL) { *dstLen = 0; } return NPPST_SUCCESS; } Ncv32u dstIndex = 0; for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++) { if (h_src[srcIndex] != elemRemove) { h_dst[dstIndex++] = h_src[srcIndex]; } } if (dstLen != NULL) { *dstLen = dstIndex; } return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen, Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove); } NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen, Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove); } //============================================================================== // // Filter.cu // //============================================================================== texture <float, 1, hipReadModeElementType> texSrc; texture <float, 1, hipReadModeElementType> texKernel; __forceinline__ __device__ float getValueMirrorRow(const int rowOffset, int i, int w) { if (i < 0) i = 1 - i; if (i >= w) i = w + w - i - 1; return tex1Dfetch (texSrc, rowOffset + i); } __forceinline__ __device__ float getValueMirrorColumn(const int offset, const int rowStep, int j, int h) { if (j < 0) j = 1 - j; if (j >= h) j = h + h - j - 1; return tex1Dfetch (texSrc, offset + j * rowStep); } __global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { // position within ROI const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int j = roi.y + iy; const int rowOffset = j * srcStep + roi.x; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width) * tex1Dfetch (texKernel, m); } pDst[iy * dstStep + ix] = sum * multiplier; } __global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int i = roi.x + ix; const int offset = i + roi.y * srcStep; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height) * tex1Dfetch (texKernel, m); } pDst[ix + iy * dstStep] = sum * multiplier; } NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderNone: return NPPST_ERROR; case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: hipLaunchKernelGGL(( FilterRowBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: hipLaunchKernelGGL(( FilterColumnBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } //============================================================================== // // FrameInterpolate.cu // //============================================================================== inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom) { return (num + denom - 1)/denom; } texture<float, 2, hipReadModeElementType> tex_src1; texture<float, 2, hipReadModeElementType> tex_src0; __global__ void BlendFramesKernel(const float *u, const float *v, // forward flow const float *ur, const float *vr, // backward flow const float *o0, const float *o1, // coverage masks int w, int h, int s, float theta, float *out) { const int ix = threadIdx.x + blockDim.x * blockIdx.x; const int iy = threadIdx.y + blockDim.y * blockIdx.y; const int pos = ix + s * iy; if (ix >= w || iy >= h) return; float _u = u[pos]; float _v = v[pos]; float _ur = ur[pos]; float _vr = vr[pos]; float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; bool b0 = o0[pos] > 1e-4f; bool b1 = o1[pos] > 1e-4f; if (b0 && b1) { // pixel is visible on both frames out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) + tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta; } else if (b0) { // visible on the first frame only out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta); } else { // visible on the second frame only out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta)); } } NCVStatus BlendFrames(const Ncv32f *src0, const Ncv32f *src1, const Ncv32f *ufi, const Ncv32f *vfi, const Ncv32f *ubi, const Ncv32f *vbi, const Ncv32f *o1, const Ncv32f *o2, Ncv32u width, Ncv32u height, Ncv32u stride, Ncv32f theta, Ncv32f *out) { tex_src1.addressMode[0] = hipAddressModeClamp; tex_src1.addressMode[1] = hipAddressModeClamp; tex_src1.filterMode = hipFilterModeLinear; tex_src1.normalized = false; tex_src0.addressMode[0] = hipAddressModeClamp; tex_src0.addressMode[1] = hipAddressModeClamp; tex_src0.filterMode = hipFilterModeLinear; tex_src0.normalized = false; hipChannelFormatDesc desc = hipCreateChannelDesc <float> (); const Ncv32u pitch = stride * sizeof (float); ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); dim3 threads (32, 4); dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y)); hipLaunchKernelGGL(( BlendFramesKernel), dim3(blocks), dim3(threads), 0, nppStGetActiveCUDAstream (), ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize, Ncv32u nStep, Ncv32u *hpSize) { NCVStatus status = NPPST_ERROR; status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize); return status; } NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState) { // check state validity ncvAssertReturn (pState->pSrcFrame0 != 0 && pState->pSrcFrame1 != 0 && pState->pFU != 0 && pState->pFV != 0 && pState->pBU != 0 && pState->pBV != 0 && pState->pNewFrame != 0 && pState->ppBuffers[0] != 0 && pState->ppBuffers[1] != 0 && pState->ppBuffers[2] != 0 && pState->ppBuffers[3] != 0 && pState->ppBuffers[4] != 0 && pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (pState->size.width > 0 && pState->size.height > 0, NPPST_ERROR); ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) && pState->nStep > 0 && pState->nStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); // change notation Ncv32f *cov0 = pState->ppBuffers[0]; Ncv32f *cov1 = pState->ppBuffers[1]; Ncv32f *fwdU = pState->ppBuffers[2]; // forward u Ncv32f *fwdV = pState->ppBuffers[3]; // forward v Ncv32f *bwdU = pState->ppBuffers[4]; // backward u Ncv32f *bwdV = pState->ppBuffers[5]; // backward v // warp flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdV) ); // warp backward flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); // interpolate frame ncvAssertReturnNcvStat ( BlendFrames (pState->pSrcFrame0, pState->pSrcFrame1, fwdU, fwdV, bwdU, bwdV, cov0, cov1, pState->size.width, pState->size.height, pState->nStep / sizeof (Ncv32f), pState->pos, pState->pNewFrame) ); return NPPST_SUCCESS; } //============================================================================== // // VectorWarpFrame.cu // //============================================================================== #if __CUDA_ARCH__ < 200 // FP32 atomic add static __forceinline__ __device__ float _atomicAdd(float *addr, float val) { float old = *addr, assumed; do { assumed = old; old = int_as_float(__iAtomicCAS((int*)addr, float_as_int(assumed), float_as_int(val+assumed))); } while( assumed!=old ); return old; } #else #define _atomicAdd atomicAdd #endif __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } } __global__ void ForwardWarpKernel_PSF1x1(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; float u_ = u[flow_row_offset + j]; float v_ = v[flow_row_offset + j]; //bottom left corner of target pixel float cx = u_ * time_scale + (float)j + 1.0f; float cy = v_ * time_scale + (float)i + 1.0f; // pixel containing bottom left corner int tx = __float2int_rn (cx); int ty = __float2int_rn (cy); float value = src[image_row_offset + j]; // fill pixel if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { _atomicAdd (dst + ty * image_stride + tx, value); } } __global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * s + j; float scale = normalization_factor[pos]; float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale); image[pos] *= invScale; } __global__ void MemsetKernel(const float value, int w, int h, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * w + j; image[pos] = value; } NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize) { ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep, NPPST_INVALID_STEP); *hpSize = nSrcStep * srcSize.height; return NPPST_SUCCESS; } // does not require normalization NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof (Ncv32f); dim3 ctaSize (32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); hipLaunchKernelGGL(( ForwardWarpKernel_PSF1x1) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f *pBuffer, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL && pBuffer != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof(Ncv32f); dim3 ctaSize(32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); hipLaunchKernelGGL(( MemsetKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), 0, srcSize.width, srcSize.height, pBuffer); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); hipLaunchKernelGGL(( ForwardWarpKernel_PSF2x2) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); hipLaunchKernelGGL(( NormalizeKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pBuffer, srcSize.width, srcSize.height, srcStep, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } //============================================================================== // // Resize.cu // //============================================================================== texture <float, 2, hipReadModeElementType> texSrc2D; __forceinline__ __device__ float processLine(int spos, float xmin, float xmax, int ixmin, int ixmax, float fxmin, float cxmax) { // first element float wsum = 1.0f - xmin + fxmin; float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin); spos++; for (int ix = ixmin + 1; ix < ixmax; ++ix) { sum += tex1Dfetch(texSrc, spos); spos++; wsum += 1.0f; } sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax); wsum += cxmax - xmax; return sum / wsum; } __global__ void resizeSuperSample_32f(NcvSize32u srcSize, Ncv32u srcStep, NcvRect32u srcROI, Ncv32f *dst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { // position within dst ROI const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } float rw = (float) srcROI.width; float rh = (float) srcROI.height; // source position float x = scaleX * (float) ix; float y = scaleY * (float) iy; // x sampling range float xBegin = fmax (x - scaleX, 0.0f); float xEnd = fmin (x + scaleX, rw - 1.0f); // y sampling range float yBegin = fmax (y - scaleY, 0.0f); float yEnd = fmin (y + scaleY, rh - 1.0f); // x range of source samples float floorXBegin = floorf (xBegin); float ceilXEnd = ceilf (xEnd); int iXBegin = srcROI.x + (int) floorXBegin; int iXEnd = srcROI.x + (int) ceilXEnd; // y range of source samples float floorYBegin = floorf (yBegin); float ceilYEnd = ceilf (yEnd); int iYBegin = srcROI.y + (int) floorYBegin; int iYEnd = srcROI.y + (int) ceilYEnd; // first row int pos = iYBegin * srcStep + iXBegin; float wsum = 1.0f - yBegin + floorYBegin; float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (1.0f - yBegin + floorYBegin); pos += srcStep; for (int iy = iYBegin + 1; iy < iYEnd; ++iy) { sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd); pos += srcStep; wsum += 1.0f; } sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (ceilYEnd - yEnd); wsum += ceilYEnd - yEnd; sum /= wsum; dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum; } // bicubic interpolation __forceinline__ __device__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } __global__ void resizeBicubic(NcvSize32u srcSize, NcvRect32u srcROI, NcvSize32u dstSize, Ncv32u dstStep, Ncv32f *dst, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } const float dx = 1.0f / srcROI.width; const float dy = 1.0f / srcROI.height; float rx = (float) srcROI.x; float ry = (float) srcROI.y; float rw = (float) srcROI.width; float rh = (float) srcROI.height; float x = scaleX * (float) ix; float y = scaleY * (float) iy; // sampling range // border mode is clamp float xmin = fmax (ceilf (x - 2.0f), 0.0f); float xmax = fmin (floorf (x + 2.0f), rw - 1.0f); float ymin = fmax (ceilf (y - 2.0f), 0.0f); float ymax = fmin (floorf (y + 2.0f), rh - 1.0f); // shift data window to match ROI rx += 0.5f; ry += 0.5f; x += rx; y += ry; xmin += rx; xmax += rx; ymin += ry; ymax += ry; float sum = 0.0f; float wsum = 0.0f; for (float cy = ymin; cy <= ymax; cy += 1.0f) { for (float cx = xmin; cx <= xmax; cx += 1.0f) { float xDist = x - cx; float yDist = y - cy; float wx = bicubicCoeff (xDist); float wy = bicubicCoeff (yDist); wx *= wy; sum += wx * tex2D (texSrc2D, cx * dx, cy * dy); wsum += wx; } } dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum; } NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, NcvRect32u srcROI, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u dstROI, Ncv32f xFactor, Ncv32f yFactor, NppStInterpMode interpolation) { NCVStatus status = NPPST_SUCCESS; ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE); ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width && nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // TODO: preprocess ROI to prevent out of bounds access if (interpolation == nppStSupersample) { // bind texture hipBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep); // invoke kernel dim3 ctaSize (32, 6); dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x, (dstROI.height + ctaSize.y - 1) / ctaSize.y); hipLaunchKernelGGL(( resizeSuperSample_32f) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else if (interpolation == nppStBicubic) { texSrc2D.addressMode[0] = hipAddressModeMirror; texSrc2D.addressMode[1] = hipAddressModeMirror; texSrc2D.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc <float> (); hipBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height, nSrcStep); dim3 ctaSize (32, 6); dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x, (dstSize.height + ctaSize.y - 1) / ctaSize.y); hipLaunchKernelGGL(( resizeBicubic) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else { status = NPPST_ERROR; } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return status; }
a80617f436de56d02cc1a55ab59a5d5e9d4adb3a.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include <vector> #include <cuda_runtime.h> #include "NPP_staging.hpp" texture<Ncv8u, 1, cudaReadModeElementType> tex8u; texture<Ncv32u, 1, cudaReadModeElementType> tex32u; texture<uint2, 1, cudaReadModeElementType> tex64u; //============================================================================== // // CUDA streams handling // //============================================================================== static cudaStream_t nppStream = 0; cudaStream_t nppStGetActiveCUDAstream(void) { return nppStream; } cudaStream_t nppStSetActiveCUDAstream(cudaStream_t cudaStream) { cudaStream_t tmp = nppStream; nppStream = cudaStream; return tmp; } //============================================================================== // // BlockScan.cuh // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 template <class T> inline __device__ T warpScanInclusive(T idata, volatile T *s_Data) { Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; } template <class T> inline __device__ T warpScanExclusive(T idata, volatile T *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <class T, Ncv32u tiNumScanThreads> inline __device__ T blockScanInclusive(T idata, volatile T *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan T warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements T val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // IntegralImage.cu // //============================================================================== const Ncv32u NUM_SCAN_THREADS = 256; const Ncv32u LOG2_NUM_SCAN_THREADS = 8; template<class T_in, class T_out> struct _scanElemOp { template<bool tbDoSqr> static inline __host__ __device__ T_out scanElemOp(T_in elem) { return scanElemOp( elem, Int2Type<(int)tbDoSqr>() ); } private: template <int v> struct Int2Type { enum { value = v }; }; static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>) { return (T_out)elem; } static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>) { return (T_out)(elem*elem); } }; template<class T> inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs); template<> inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs); } template<> inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } template<> inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } /** * \brief Segmented scan kernel * * Calculates per-row prefix scans of the input image. * Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * \tparam T_op Defines an operation to be performed on the input image pixels * * \param d_src [IN] Source image pointer * \param srcWidth [IN] Source image width * \param srcStride [IN] Source image stride * \param d_II [OUT] Output image pointer * \param IIstride [IN] Output image stride * * \return None */ template <class T_in, class T_out, bool tbDoSqr> __global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride, T_out *d_II, Ncv32u IIstride) { //advance pointers to the current line if (sizeof(T_in) != 1) { d_src += srcStride * blockIdx.x; } //for initial image 8bit source we use texref tex8u d_II += IIstride * blockIdx.x; Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS; Ncv32u offsetX = 0; __shared__ T_out shmem[NUM_SCAN_THREADS * 2]; __shared__ T_out carryElem; carryElem = 0; __syncthreads(); while (numBuckets--) { Ncv32u curElemOffs = offsetX + threadIdx.x; T_out curScanElem; T_in curElem; T_out curElemMod; if (curElemOffs < srcWidth) { //load elements curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs); } curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem); //inclusive scan curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem); if (curElemOffs <= srcWidth) { //make scan exclusive and write the bucket to the output buffer d_II[curElemOffs] = carryElem + curScanElem - curElemMod; offsetX += NUM_SCAN_THREADS; } //remember last element for subsequent buckets adjustment __syncthreads(); if (threadIdx.x == NUM_SCAN_THREADS-1) { carryElem += curScanElem; } __syncthreads(); } if (offsetX == srcWidth && !threadIdx.x) { d_II[offsetX] = carryElem; } } template <bool tbDoSqr, class T_in, class T_out> NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride, T_out *d_dst, Ncv32u dstStride, NcvSize32u roi) { cudaChannelFormatDesc cfdTex; size_t alignmentOffset = 0; if (sizeof(T_in) == 1) { cfdTex = cudaCreateChannelDesc<Ncv8u>(); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); if (alignmentOffset > 0) { ncvAssertCUDAReturn(cudaUnbindTexture(tex8u), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); } } scanRows <T_in, T_out, tbDoSqr> <<<roi.height, NUM_SCAN_THREADS, 0, nppStGetActiveCUDAstream()>>> (d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment) { Ncv32u alignMask = allocatorAlignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u dimBytes = dim * elemTypeSize; Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask; Ncv32u PaddedDim = pitch / elemTypeSize; return PaddedDim; } template <class T_in, class T_out> NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep, T_out *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) && dstStep >= (roi.width + 1) * sizeof(T_out) && srcStep % sizeof(T_in) == 0 && dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T_in); dstStep /= sizeof(T_out); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <false> (d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u), (Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false> (Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), (Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width && dstStep >= (roi.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64; Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64; NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax); ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64); ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <true, Ncv8u, Ncv32u> (d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u), Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false, Ncv32u, Ncv64u> (Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u), d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width, (Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f), (Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width, NULL, (roiSize.width+1) * sizeof(Ncv64u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv32u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep, Ncv32f *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv32u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) && dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv32u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv32u top = h_dst[(i-1) * dstStep + j]; Ncv32u left = h_dst[i * dstStep + (j - 1)]; Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep, Ncv32f *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) && dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) && srcStep % sizeof(Ncv32f) == 0 && dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(Ncv32f); dstStep /= sizeof(Ncv32f); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0.0f; for (Ncv32u j=1; j<WidthII; j++) { Ncv32f top = h_dst[(i-1) * dstStep + j]; Ncv32f left = h_dst[i * dstStep + (j - 1)]; Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv64u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv64u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv64u top = h_dst[(i-1) * dstStep + j]; Ncv64u left = h_dst[i * dstStep + (j - 1)]; Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem*elem + left - topleft + top; } } return NPPST_SUCCESS; } //============================================================================== // // Decimate.cu // //============================================================================== const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32; const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8; template<class T, NcvBool tbCacheTexture> __device__ T getElem_Decimate(Ncv32u x, T *d_src); template<> __device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src) { return tex1Dfetch(tex32u, x); } template<> __device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src) { return d_src[x]; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src) { return d_src[x]; } template <class T, NcvBool tbCacheTexture> __global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u dstRoi, Ncv32u scale) { int curX = blockIdx.x * blockDim.x + threadIdx.x; int curY = blockIdx.y * blockDim.y + threadIdx.y; if (curX >= dstRoi.width || curY >= dstRoi.height) { return; } d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src); } template <class T> static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X, (dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y); dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y); if (!readThruTexture) { decimate_C1R <T, false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStep, d_dst, dstStep, dstRoi, scale); } else { cudaChannelFormatDesc cfdTexSrc; if (sizeof(T) == sizeof(Ncv32u)) { cfdTexSrc = cudaCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } else { cfdTexSrc = cudaCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } decimate_C1R <T, true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStep, d_dst, dstStep, dstRoi, scale); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep, T *h_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) && srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; for (Ncv32u i=0; i<dstRoi.height; i++) { for (Ncv32u j=0; j<dstRoi.width; j++) { h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale]; } } return NPPST_SUCCESS; } #define implementNppDecimate(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \ { \ return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, \ srcRoi, scale, readThruTexture); \ } #define implementNppDecimateHost(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale) \ { \ return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, \ srcRoi, scale); \ } implementNppDecimate(32, u) implementNppDecimate(32, s) implementNppDecimate(32, f) implementNppDecimate(64, u) implementNppDecimate(64, s) implementNppDecimate(64, f) implementNppDecimateHost(32, u) implementNppDecimateHost(32, s) implementNppDecimateHost(32, f) implementNppDecimateHost(64, u) implementNppDecimateHost(64, s) implementNppDecimateHost(64, f) //============================================================================== // // RectStdDev.cu // //============================================================================== const Ncv32u NUM_RECTSTDDEV_THREADS = 128; template <NcvBool tbCacheTexture> __device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum) { if (tbCacheTexture) { return tex1Dfetch(tex32u, x); } else { return d_sum[x]; } } template <NcvBool tbCacheTexture> __device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum) { if (tbCacheTexture) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } else { return d_sqsum[x]; } } template <NcvBool tbCacheTexture> __global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea) { Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x; if (x_offs >= roi.width) { return; } Ncv32u sum_offset = blockIdx.y * sumStep + x_offs; Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs; //OPT: try swapping order (could change cache hit/miss ratio) Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum); Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum); Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br; sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum); sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum); sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum); sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum); Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl; Ncv32f mean = sum_val * invRectArea; ////////////////////////////////////////////////////////////////////////// // sqsum_val_res = sqsum_val / rectArea ////////////////////////////////////////////////////////////////////////// Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val); Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1); Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2; Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3); sqsum_val_1 *= invRectArea; sqsum_val_4 *= invRectArea; Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4; ////////////////////////////////////////////////////////////////////////// // variance = sqsum_val_res - mean * mean ////////////////////////////////////////////////////////////////////////// #if defined DISABLE_MAD_SELECTIVELY Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean); #else Ncv32f variance = sqsum_val_res - mean * mean; #endif ////////////////////////////////////////////////////////////////////////// // stddev = sqrtf(variance) ////////////////////////////////////////////////////////////////////////// //Ncv32f stddev = sqrtf(variance); Ncv32f stddev = __fsqrt_rn(variance); d_norm[blockIdx.y * normStep + x_offs] = stddev; } NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea, NcvBool readThruTexture) { ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height); dim3 block(NUM_RECTSTDDEV_THREADS); if (!readThruTexture) { rectStdDev_32f_C1R <false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } else { cudaChannelFormatDesc cfdTexSrc; cudaChannelFormatDesc cfdTexSqr; cfdTexSrc = cudaCreateChannelDesc<Ncv32u>(); cfdTexSqr = cudaCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); rectStdDev_32f_C1R <true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep, Ncv64u *h_sqsum, Ncv32u sqsumStep, Ncv32f *h_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea) { ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; for (Ncv32u i=0; i<roi.height; i++) { for (Ncv32u j=0; j<roi.width; j++) { Ncv32u sum_offset = i * sumStep + j; Ncv32u sqsum_offset = i * sqsumStep + j; Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x]; Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x]; Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width]; Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width]; Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x]; Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x]; Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width]; Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width]; Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl); Ncv64f mean = sum_val * invRectArea; Ncv64f sqsum_val_2 = sqsum_val / rectArea; Ncv64f variance = sqsum_val_2 - mean * mean; h_norm[i * normStep + j] = (Ncv32f)sqrt(variance); } } return NPPST_SUCCESS; } //============================================================================== // // Transpose.cu // //============================================================================== const Ncv32u TRANSPOSE_TILE_DIM = 16; const Ncv32u TRANSPOSE_BLOCK_ROWS = 16; /** * \brief Matrix transpose kernel * * Calculates transpose of the input image * \see TRANSPOSE_TILE_DIM * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * * \param d_src [IN] Source image pointer * \param srcStride [IN] Source image stride * \param d_dst [OUT] Output image pointer * \param dstStride [IN] Output image stride * * \return None */ template <class T> __global__ void transpose(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { __shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1]; Ncv32u blockIdx_x, blockIdx_y; // do diagonal reordering if (gridDim.x == gridDim.y) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x; } else { Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bid % gridDim.y; blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x; } Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x; Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y; Ncv32u index_gmem = xIndex + yIndex * srcStride; if (xIndex < srcRoi.width) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.height) { tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride]; } } } __syncthreads(); xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y; index_gmem = xIndex + yIndex * dstStride; if (xIndex < srcRoi.height) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.width) { d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i]; } } } } template <class T> NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM); dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM); transpose <T> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStride, d_dst, dstStride, srcRoi); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride, T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); for (Ncv32u i=0; i<srcRoi.height; i++) { for (Ncv32u j=0; j<srcRoi.width; j++) { h_dst[j*dstStride+i] = h_src[i*srcStride + j]; } } return NPPST_SUCCESS; } #define implementNppTranspose(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \ { \ return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, srcRoi); \ } #define implementNppTransposeHost(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi) \ { \ return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, srcRoi); \ } implementNppTranspose(32,u) implementNppTranspose(32,s) implementNppTranspose(32,f) implementNppTranspose(64,u) implementNppTranspose(64,s) implementNppTranspose(64,f) implementNppTransposeHost(32,u) implementNppTransposeHost(32,s) implementNppTransposeHost(32,f) implementNppTransposeHost(64,u) implementNppTransposeHost(64,s) implementNppTransposeHost(64,f) NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } //============================================================================== // // Compact.cu // //============================================================================== const Ncv32u NUM_REMOVE_THREADS = 256; template <bool bRemove, bool bWritePartial> __global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_blockSums, Ncv32u elemRemove) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn > srcLen + blockDim.x) { return; } __shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2]; Ncv32u scanElem = 0; if (elemAddrIn < srcLen) { if (bRemove) { scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0; } else { scanElem = d_src[elemAddrIn]; } } Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem); __syncthreads(); if (elemAddrIn < srcLen) { if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial) { d_blockSums[blockId] = localScanInc; } if (bRemove) { d_offsets[elemAddrIn] = localScanInc - scanElem; } else { d_src[elemAddrIn] = localScanInc - scanElem; } } } __global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } __shared__ Ncv32u valOffs; valOffs = d_blockSums[blockId]; __syncthreads(); d_offsets[elemAddrIn] += valOffs; } __global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_dst, Ncv32u elemRemove, Ncv32u *dstLenValue) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } Ncv32u elem = d_src[elemAddrIn]; Ncv32u elemAddrOut = d_offsets[elemAddrIn]; if (elem != elemRemove) { d_dst[elemAddrOut] = elem; } if (elemAddrIn == srcLen-1) { if (elem != elemRemove) { *dstLenValue = elemAddrOut + 1; } else { *dstLenValue = elemAddrOut; } } } NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *dstLenPinned, Ncv32u elemRemove, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLenPinned != NULL) { *dstLenPinned = 0; } return NPPST_SUCCESS; } std::vector<Ncv32u> partSumNums; std::vector<Ncv32u> partSumOffsets; Ncv32u partSumLastNum = srcLen; Ncv32u partSumLastOffs = 0; do { partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u), gpuAllocator.alignment()) / sizeof(Ncv32u); partSumLastOffs += curPartSumAlignedLength; partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS; } while (partSumLastNum>1); partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1); ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1); ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN dim3 block(NUM_REMOVE_THREADS); //calculate zero-level partial sums for indices calculation if (partSumNums.size() > 2) { dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } removePass1Scan <true, true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), d_hierSums.ptr() + partSumOffsets[1], elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //calculate hierarchical partial sums for (Ncv32u i=1; i<partSumNums.size()-1; i++) { dim3 grid_partial(partSumNums[i+1]); if (grid_partial.x > 65535) { grid_partial.y = (grid_partial.x + 65534) / 65535; grid_partial.x = 65535; } if (grid_partial.x != 1) { removePass1Scan <false, true> <<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, d_hierSums.ptr() + partSumOffsets[i+1], NULL); } else { removePass1Scan <false, false> <<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, NULL, NULL); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //adjust hierarchical partial sums for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--) { dim3 grid_local(partSumNums[i+1]); if (grid_local.x > 65535) { grid_local.y = (grid_local.x + 65534) / 65535; grid_local.x = 65535; } removePass2Adjust <<<grid_local, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], d_hierSums.ptr() + partSumOffsets[i+1]); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } } else { dim3 grid_local(partSumNums[1]); removePass1Scan <true, false> <<<grid_local, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), NULL, elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //compact source vector using indices dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } removePass3Compact <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), d_dst, elemRemove, d_numDstElements.ptr()); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //get number of dst elements if (dstLenPinned != NULL) { ncvAssertCUDAReturn(cudaMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u), cudaMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); } NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { *pBufsize = 0; return NPPST_SUCCESS; } NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *p_dstLen, Ncv32u elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen, Ncv32s *d_dst, Ncv32u *p_dstLen, Ncv32s elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen, Ncv32f *d_dst, Ncv32u *p_dstLen, Ncv32f elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen, Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLen != NULL) { *dstLen = 0; } return NPPST_SUCCESS; } Ncv32u dstIndex = 0; for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++) { if (h_src[srcIndex] != elemRemove) { h_dst[dstIndex++] = h_src[srcIndex]; } } if (dstLen != NULL) { *dstLen = dstIndex; } return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen, Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove); } NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen, Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove); } //============================================================================== // // Filter.cu // //============================================================================== texture <float, 1, cudaReadModeElementType> texSrc; texture <float, 1, cudaReadModeElementType> texKernel; __forceinline__ __device__ float getValueMirrorRow(const int rowOffset, int i, int w) { if (i < 0) i = 1 - i; if (i >= w) i = w + w - i - 1; return tex1Dfetch (texSrc, rowOffset + i); } __forceinline__ __device__ float getValueMirrorColumn(const int offset, const int rowStep, int j, int h) { if (j < 0) j = 1 - j; if (j >= h) j = h + h - j - 1; return tex1Dfetch (texSrc, offset + j * rowStep); } __global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { // position within ROI const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int j = roi.y + iy; const int rowOffset = j * srcStep + roi.x; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width) * tex1Dfetch (texKernel, m); } pDst[iy * dstStep + ix] = sum * multiplier; } __global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int i = roi.x + ix; const int offset = i + roi.y * srcStep; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height) * tex1Dfetch (texKernel, m); } pDst[ix + iy * dstStep] = sum * multiplier; } NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderNone: return NPPST_ERROR; case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: FilterRowBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: FilterColumnBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } //============================================================================== // // FrameInterpolate.cu // //============================================================================== inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom) { return (num + denom - 1)/denom; } texture<float, 2, cudaReadModeElementType> tex_src1; texture<float, 2, cudaReadModeElementType> tex_src0; __global__ void BlendFramesKernel(const float *u, const float *v, // forward flow const float *ur, const float *vr, // backward flow const float *o0, const float *o1, // coverage masks int w, int h, int s, float theta, float *out) { const int ix = threadIdx.x + blockDim.x * blockIdx.x; const int iy = threadIdx.y + blockDim.y * blockIdx.y; const int pos = ix + s * iy; if (ix >= w || iy >= h) return; float _u = u[pos]; float _v = v[pos]; float _ur = ur[pos]; float _vr = vr[pos]; float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; bool b0 = o0[pos] > 1e-4f; bool b1 = o1[pos] > 1e-4f; if (b0 && b1) { // pixel is visible on both frames out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) + tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta; } else if (b0) { // visible on the first frame only out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta); } else { // visible on the second frame only out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta)); } } NCVStatus BlendFrames(const Ncv32f *src0, const Ncv32f *src1, const Ncv32f *ufi, const Ncv32f *vfi, const Ncv32f *ubi, const Ncv32f *vbi, const Ncv32f *o1, const Ncv32f *o2, Ncv32u width, Ncv32u height, Ncv32u stride, Ncv32f theta, Ncv32f *out) { tex_src1.addressMode[0] = cudaAddressModeClamp; tex_src1.addressMode[1] = cudaAddressModeClamp; tex_src1.filterMode = cudaFilterModeLinear; tex_src1.normalized = false; tex_src0.addressMode[0] = cudaAddressModeClamp; tex_src0.addressMode[1] = cudaAddressModeClamp; tex_src0.filterMode = cudaFilterModeLinear; tex_src0.normalized = false; cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> (); const Ncv32u pitch = stride * sizeof (float); ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); dim3 threads (32, 4); dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y)); BlendFramesKernel<<<blocks, threads, 0, nppStGetActiveCUDAstream ()>>> (ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize, Ncv32u nStep, Ncv32u *hpSize) { NCVStatus status = NPPST_ERROR; status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize); return status; } NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState) { // check state validity ncvAssertReturn (pState->pSrcFrame0 != 0 && pState->pSrcFrame1 != 0 && pState->pFU != 0 && pState->pFV != 0 && pState->pBU != 0 && pState->pBV != 0 && pState->pNewFrame != 0 && pState->ppBuffers[0] != 0 && pState->ppBuffers[1] != 0 && pState->ppBuffers[2] != 0 && pState->ppBuffers[3] != 0 && pState->ppBuffers[4] != 0 && pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (pState->size.width > 0 && pState->size.height > 0, NPPST_ERROR); ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) && pState->nStep > 0 && pState->nStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); // change notation Ncv32f *cov0 = pState->ppBuffers[0]; Ncv32f *cov1 = pState->ppBuffers[1]; Ncv32f *fwdU = pState->ppBuffers[2]; // forward u Ncv32f *fwdV = pState->ppBuffers[3]; // forward v Ncv32f *bwdU = pState->ppBuffers[4]; // backward u Ncv32f *bwdV = pState->ppBuffers[5]; // backward v // warp flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdV) ); // warp backward flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); // interpolate frame ncvAssertReturnNcvStat ( BlendFrames (pState->pSrcFrame0, pState->pSrcFrame1, fwdU, fwdV, bwdU, bwdV, cov0, cov1, pState->size.width, pState->size.height, pState->nStep / sizeof (Ncv32f), pState->pos, pState->pNewFrame) ); return NPPST_SUCCESS; } //============================================================================== // // VectorWarpFrame.cu // //============================================================================== #if __CUDA_ARCH__ < 200 // FP32 atomic add static __forceinline__ __device__ float _atomicAdd(float *addr, float val) { float old = *addr, assumed; do { assumed = old; old = int_as_float(__iAtomicCAS((int*)addr, float_as_int(assumed), float_as_int(val+assumed))); } while( assumed!=old ); return old; } #else #define _atomicAdd atomicAdd #endif __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } } __global__ void ForwardWarpKernel_PSF1x1(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; float u_ = u[flow_row_offset + j]; float v_ = v[flow_row_offset + j]; //bottom left corner of target pixel float cx = u_ * time_scale + (float)j + 1.0f; float cy = v_ * time_scale + (float)i + 1.0f; // pixel containing bottom left corner int tx = __float2int_rn (cx); int ty = __float2int_rn (cy); float value = src[image_row_offset + j]; // fill pixel if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { _atomicAdd (dst + ty * image_stride + tx, value); } } __global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * s + j; float scale = normalization_factor[pos]; float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale); image[pos] *= invScale; } __global__ void MemsetKernel(const float value, int w, int h, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * w + j; image[pos] = value; } NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize) { ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep, NPPST_INVALID_STEP); *hpSize = nSrcStep * srcSize.height; return NPPST_SUCCESS; } // does not require normalization NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof (Ncv32f); dim3 ctaSize (32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); ForwardWarpKernel_PSF1x1 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f *pBuffer, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL && pBuffer != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof(Ncv32f); dim3 ctaSize(32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); MemsetKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (0, srcSize.width, srcSize.height, pBuffer); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); ForwardWarpKernel_PSF2x2 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); NormalizeKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pBuffer, srcSize.width, srcSize.height, srcStep, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } //============================================================================== // // Resize.cu // //============================================================================== texture <float, 2, cudaReadModeElementType> texSrc2D; __forceinline__ __device__ float processLine(int spos, float xmin, float xmax, int ixmin, int ixmax, float fxmin, float cxmax) { // first element float wsum = 1.0f - xmin + fxmin; float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin); spos++; for (int ix = ixmin + 1; ix < ixmax; ++ix) { sum += tex1Dfetch(texSrc, spos); spos++; wsum += 1.0f; } sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax); wsum += cxmax - xmax; return sum / wsum; } __global__ void resizeSuperSample_32f(NcvSize32u srcSize, Ncv32u srcStep, NcvRect32u srcROI, Ncv32f *dst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { // position within dst ROI const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } float rw = (float) srcROI.width; float rh = (float) srcROI.height; // source position float x = scaleX * (float) ix; float y = scaleY * (float) iy; // x sampling range float xBegin = fmax (x - scaleX, 0.0f); float xEnd = fmin (x + scaleX, rw - 1.0f); // y sampling range float yBegin = fmax (y - scaleY, 0.0f); float yEnd = fmin (y + scaleY, rh - 1.0f); // x range of source samples float floorXBegin = floorf (xBegin); float ceilXEnd = ceilf (xEnd); int iXBegin = srcROI.x + (int) floorXBegin; int iXEnd = srcROI.x + (int) ceilXEnd; // y range of source samples float floorYBegin = floorf (yBegin); float ceilYEnd = ceilf (yEnd); int iYBegin = srcROI.y + (int) floorYBegin; int iYEnd = srcROI.y + (int) ceilYEnd; // first row int pos = iYBegin * srcStep + iXBegin; float wsum = 1.0f - yBegin + floorYBegin; float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (1.0f - yBegin + floorYBegin); pos += srcStep; for (int iy = iYBegin + 1; iy < iYEnd; ++iy) { sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd); pos += srcStep; wsum += 1.0f; } sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (ceilYEnd - yEnd); wsum += ceilYEnd - yEnd; sum /= wsum; dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum; } // bicubic interpolation __forceinline__ __device__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } __global__ void resizeBicubic(NcvSize32u srcSize, NcvRect32u srcROI, NcvSize32u dstSize, Ncv32u dstStep, Ncv32f *dst, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } const float dx = 1.0f / srcROI.width; const float dy = 1.0f / srcROI.height; float rx = (float) srcROI.x; float ry = (float) srcROI.y; float rw = (float) srcROI.width; float rh = (float) srcROI.height; float x = scaleX * (float) ix; float y = scaleY * (float) iy; // sampling range // border mode is clamp float xmin = fmax (ceilf (x - 2.0f), 0.0f); float xmax = fmin (floorf (x + 2.0f), rw - 1.0f); float ymin = fmax (ceilf (y - 2.0f), 0.0f); float ymax = fmin (floorf (y + 2.0f), rh - 1.0f); // shift data window to match ROI rx += 0.5f; ry += 0.5f; x += rx; y += ry; xmin += rx; xmax += rx; ymin += ry; ymax += ry; float sum = 0.0f; float wsum = 0.0f; for (float cy = ymin; cy <= ymax; cy += 1.0f) { for (float cx = xmin; cx <= xmax; cx += 1.0f) { float xDist = x - cx; float yDist = y - cy; float wx = bicubicCoeff (xDist); float wy = bicubicCoeff (yDist); wx *= wy; sum += wx * tex2D (texSrc2D, cx * dx, cy * dy); wsum += wx; } } dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum; } NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, NcvRect32u srcROI, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u dstROI, Ncv32f xFactor, Ncv32f yFactor, NppStInterpMode interpolation) { NCVStatus status = NPPST_SUCCESS; ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE); ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width && nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // TODO: preprocess ROI to prevent out of bounds access if (interpolation == nppStSupersample) { // bind texture cudaBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep); // invoke kernel dim3 ctaSize (32, 6); dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x, (dstROI.height + ctaSize.y - 1) / ctaSize.y); resizeSuperSample_32f <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else if (interpolation == nppStBicubic) { texSrc2D.addressMode[0] = cudaAddressModeMirror; texSrc2D.addressMode[1] = cudaAddressModeMirror; texSrc2D.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> (); cudaBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height, nSrcStep); dim3 ctaSize (32, 6); dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x, (dstSize.height + ctaSize.y - 1) / ctaSize.y); resizeBicubic <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else { status = NPPST_ERROR; } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return status; }
795bb0fd878622d51ff21271a07d9da21c9e4823.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdio.h" #define N 10 __global__ void add(int *a, int *b, int *c) { int tID = blockIdx.x; if (tID < N) { c[tID] = a[tID] + b[tID]; } } int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; hipMalloc((void **)&dev_a, N * sizeof(int)); hipMalloc((void **)&dev_b, N * sizeof(int)); hipMalloc((void **)&dev_c, N * sizeof(int)); // Fill Arrays for (int i = 0; i < N; i++) { a[i] = i, b[i] = 1; } hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c); hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } return 0; }
795bb0fd878622d51ff21271a07d9da21c9e4823.cu
#include "stdio.h" #define N 10 __global__ void add(int *a, int *b, int *c) { int tID = blockIdx.x; if (tID < N) { c[tID] = a[tID] + b[tID]; } } int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; cudaMalloc((void **)&dev_a, N * sizeof(int)); cudaMalloc((void **)&dev_b, N * sizeof(int)); cudaMalloc((void **)&dev_c, N * sizeof(int)); // Fill Arrays for (int i = 0; i < N; i++) { a[i] = i, b[i] = 1; } cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice); add<<<N, 1>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } return 0; }
6e336f93b58e091d524764d84da58585361d7dbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void bcs_fluid(double *f0, mystruct *param ) { int idx=threadIdx.x + blockIdx.x * blockDim.x ; double c, u ; if(idx <param->N) { if(param->pressure_bcs_ew==1 && idx%param->LX==0 )//WEST { u = 1.//rho0_in - ( *(f0 + idx*9 + 0) + *(f0 + idx*9 + 2) + *(f0 + idx*9 +4) + 2.*( *(f0 + idx*9 + 3) + *(f0 + idx*9 + 7) + *(f0 + idx*9 +6) ) )/param->rho0_in; c = u*param->rho0_in; *(f0 + idx*9 + 1) = *(f0 + idx*9+ 3) + (2./3.)*c; *(f0 + idx*9 + 5) = *(f0 + idx*9+ 7) + (1./2.)*( *(f0 + idx*9 + 4) - *(f0 + idx*9 + 2)) + (1./6.)*c; *(f0 + idx*9 + 8) = *(f0 + idx*9+ 6) + (1./2.)*( *(f0 + idx*9 + 2) - *(f0 + idx*9 + 4)) + (1./6.)*c; } if( param->pressure_bcs_ew==1 && (idx+1)%param->LX==0) //EAST { u = -1.//rho0_in + ( *(f0 + idx*9 + 0) + *(f0 + idx*9 + 2) + *(f0 + idx*9 +4) + 2.*( *(f0 + idx*9 + 1) + *(f0 + idx*9 + 5) + *(f0 + idx*9 +8) ) )/param->rho0_out; c = u*param->rho0_out; *(f0 + idx*9 + 3) = *(f0 + idx*9+ 1) - (2./3.)*c; *(f0 + idx*9 + 7) = *(f0 + idx*9+ 5) + (1./2.)*( *(f0 + idx*9 + 2) - *(f0 + idx*9 + 4)) - (1./6.)*c; *(f0 + idx*9 + 6) = *(f0 + idx*9+ 8) + (1./2.)*( *(f0 + idx*9 + 4) - *(f0 + idx*9 + 2)) - (1./6.)*c; } # if VELOCITY_BCS_EW int idx=threadIdx.x + blockIdx.x * blockDim.x, a ; double ux_in, ux_out, c, rho; if(idx%param->LX==0) //WEST { ux_in=param->ux_in; rho = ( *(f0 + idx*9 + 0) + *(f0 + idx*9 + 2) + *(f0 + idx*9 +4) + 2.*( *(f0 + idx*9 + 3) + *(f0 + idx*9 + 7) + *(f0 + idx*9 +6) ) )/(1.-ux_in); c = ux_in*rho; *(f0 + idx*9 + 1) = *(f0 + idx*9+ 3) + (2./3.)*c; *(f0 + idx*9 + 5) = *(f0 + idx*9+ 7) + (1./2.)*( *(f0 + idx*9 + 4) - *(f0 + idx*9 + 2)) + (1./6.)*c; *(f0 + idx*9 + 8) = *(f0 + idx*9+ 6) + (1./2.)*( *(f0 + idx*9 + 2) - *(f0 + idx*9 + 4)) + (1./6.)*c; } if( (idx+1)%param->LX==0) //EAST { ux_out=param->ux_out; rho = ( *(f0 + idx*9 + 0) + *(f0 + idx*9 + 2) + *(f0 + idx*9 +4) + 2.*( *(f0 + idx*9 + 1) + *(f0 + idx*9 + 5) + *(f0 + idx*9 +8) ) )/(1.+ux_out); c = ux_out*rho; *(f0 + idx*9 + 3) = *(f0 + idx*9+ 1) - (2./3.)*c; *(f0 + idx*9 + 7) = *(f0 + idx*9+ 5) + (1./2.)*( *(f0 + idx*9 + 2) - *(f0 + idx*9 + 4)) - (1./6.)*c; *(f0 + idx*9 + 6) = *(f0 + idx*9+ 8) + (1./2.)*( *(f0 + idx*9 + 4) - *(f0 + idx*9 + 2)) - (1./6.)*c; } #endif } }
6e336f93b58e091d524764d84da58585361d7dbf.cu
__global__ void bcs_fluid(double *f0, mystruct *param ) { int idx=threadIdx.x + blockIdx.x * blockDim.x ; double c, u ; if(idx <param->N) { if(param->pressure_bcs_ew==1 && idx%param->LX==0 )//WEST { u = 1.//rho0_in - ( *(f0 + idx*9 + 0) + *(f0 + idx*9 + 2) + *(f0 + idx*9 +4) + 2.*( *(f0 + idx*9 + 3) + *(f0 + idx*9 + 7) + *(f0 + idx*9 +6) ) )/param->rho0_in; c = u*param->rho0_in; *(f0 + idx*9 + 1) = *(f0 + idx*9+ 3) + (2./3.)*c; *(f0 + idx*9 + 5) = *(f0 + idx*9+ 7) + (1./2.)*( *(f0 + idx*9 + 4) - *(f0 + idx*9 + 2)) + (1./6.)*c; *(f0 + idx*9 + 8) = *(f0 + idx*9+ 6) + (1./2.)*( *(f0 + idx*9 + 2) - *(f0 + idx*9 + 4)) + (1./6.)*c; } if( param->pressure_bcs_ew==1 && (idx+1)%param->LX==0) //EAST { u = -1.//rho0_in + ( *(f0 + idx*9 + 0) + *(f0 + idx*9 + 2) + *(f0 + idx*9 +4) + 2.*( *(f0 + idx*9 + 1) + *(f0 + idx*9 + 5) + *(f0 + idx*9 +8) ) )/param->rho0_out; c = u*param->rho0_out; *(f0 + idx*9 + 3) = *(f0 + idx*9+ 1) - (2./3.)*c; *(f0 + idx*9 + 7) = *(f0 + idx*9+ 5) + (1./2.)*( *(f0 + idx*9 + 2) - *(f0 + idx*9 + 4)) - (1./6.)*c; *(f0 + idx*9 + 6) = *(f0 + idx*9+ 8) + (1./2.)*( *(f0 + idx*9 + 4) - *(f0 + idx*9 + 2)) - (1./6.)*c; } # if VELOCITY_BCS_EW int idx=threadIdx.x + blockIdx.x * blockDim.x, a ; double ux_in, ux_out, c, rho; if(idx%param->LX==0) //WEST { ux_in=param->ux_in; rho = ( *(f0 + idx*9 + 0) + *(f0 + idx*9 + 2) + *(f0 + idx*9 +4) + 2.*( *(f0 + idx*9 + 3) + *(f0 + idx*9 + 7) + *(f0 + idx*9 +6) ) )/(1.-ux_in); c = ux_in*rho; *(f0 + idx*9 + 1) = *(f0 + idx*9+ 3) + (2./3.)*c; *(f0 + idx*9 + 5) = *(f0 + idx*9+ 7) + (1./2.)*( *(f0 + idx*9 + 4) - *(f0 + idx*9 + 2)) + (1./6.)*c; *(f0 + idx*9 + 8) = *(f0 + idx*9+ 6) + (1./2.)*( *(f0 + idx*9 + 2) - *(f0 + idx*9 + 4)) + (1./6.)*c; } if( (idx+1)%param->LX==0) //EAST { ux_out=param->ux_out; rho = ( *(f0 + idx*9 + 0) + *(f0 + idx*9 + 2) + *(f0 + idx*9 +4) + 2.*( *(f0 + idx*9 + 1) + *(f0 + idx*9 + 5) + *(f0 + idx*9 +8) ) )/(1.+ux_out); c = ux_out*rho; *(f0 + idx*9 + 3) = *(f0 + idx*9+ 1) - (2./3.)*c; *(f0 + idx*9 + 7) = *(f0 + idx*9+ 5) + (1./2.)*( *(f0 + idx*9 + 2) - *(f0 + idx*9 + 4)) - (1./6.)*c; *(f0 + idx*9 + 6) = *(f0 + idx*9+ 8) + (1./2.)*( *(f0 + idx*9 + 4) - *(f0 + idx*9 + 2)) - (1./6.)*c; } #endif } }
804f9bce3c7c781fff289ef5a98ca5e7fc7f2bb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "cuda_utils.h" #include "timer.c" typedef float dtype; __global__ void matTrans(dtype* AT, dtype* A, int N) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col<N && row<N) AT[col*N+row] = A[col+row*N]; } void parseArg (int argc, char** argv, int* N) { if(argc == 2) { *N = atoi (argv[1]); assert (*N > 0); } else { fprintf (stderr, "usage: %s <N>\n", argv[0]); exit (EXIT_FAILURE); } } void initArr (dtype* in, int N) { int i; for(i = 0; i < N; i++) { in[i] = (dtype) rand () / RAND_MAX; } } void cpuTranspose (dtype* A, dtype* AT, int N) { int i, j; for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { AT[j * N + i] = A[i * N + j]; } } } int cmpArr (dtype* a, dtype* b, int N) { int cnt, i; cnt = 0; for(i = 0; i < N; i++) { if(abs(a[i] - b[i]) > 1e-6) cnt++; } return cnt; } void gpuTranspose (dtype* A, dtype* AT, int N) { struct stopwatch_t* timer = NULL; long double t_gpu; /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* run your kernel here */ dtype *d_idata, *d_odata; CUDA_CHECK_ERROR(hipMalloc (&d_idata, N*N*sizeof(dtype)); CUDA_CHECK_ERROR(hipMalloc (&d_odata, N*N*sizeof(dtype)); CUDA_CHECK_ERROR(hipMemcpy (d_idata, A, N*N*sizeof(dtype), hipMemcpyHostToDevice); dim3 grid(N/K+1, N/K+1, 1); dim3 block(K, K, 1);hipLaunchKernelGGL(( matTrans) , dim3(grid), dim3(block), 0, 0, d_odata, d_idata, N); CUDA_CHECK_ERROR(hipMemcpy (AT, d_odata, N*N*sizeof(dtype), hipMemcpyDeviceToHost); hipDeviceSynchronize (); t_gpu = stopwatch_stop (timer); fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 ); } int main(int argc, char** argv) { /* variables */ dtype *A, *ATgpu, *ATcpu; int err; int N; struct stopwatch_t* timer = NULL; long double t_cpu; N = -1; parseArg (argc, argv, &N); /* input and output matrices on host */ /* output */ ATcpu = (dtype*) malloc (N * N * sizeof (dtype)); ATgpu = (dtype*) malloc (N * N * sizeof (dtype)); /* input */ A = (dtype*) malloc (N * N * sizeof (dtype)); initArr (A, N * N); /* GPU transpose kernel */ gpuTranspose (A, ATgpu, N); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* compute reference array */ cpuTranspose (A, ATcpu, N); t_cpu = stopwatch_stop (timer); fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n", t_cpu); /* check correctness */ err = cmpArr (ATgpu, ATcpu, N * N); if(err) { fprintf (stderr, "Transpose failed: %d\n", err); } else { fprintf (stderr, "Transpose successful\n"); } free (A); free (ATgpu); free (ATcpu); return 0; }
804f9bce3c7c781fff289ef5a98ca5e7fc7f2bb5.cu
#include <stdlib.h> #include <stdio.h> #include "cuda_utils.h" #include "timer.c" typedef float dtype; __global__ void matTrans(dtype* AT, dtype* A, int N) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col<N && row<N) AT[col*N+row] = A[col+row*N]; } void parseArg (int argc, char** argv, int* N) { if(argc == 2) { *N = atoi (argv[1]); assert (*N > 0); } else { fprintf (stderr, "usage: %s <N>\n", argv[0]); exit (EXIT_FAILURE); } } void initArr (dtype* in, int N) { int i; for(i = 0; i < N; i++) { in[i] = (dtype) rand () / RAND_MAX; } } void cpuTranspose (dtype* A, dtype* AT, int N) { int i, j; for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { AT[j * N + i] = A[i * N + j]; } } } int cmpArr (dtype* a, dtype* b, int N) { int cnt, i; cnt = 0; for(i = 0; i < N; i++) { if(abs(a[i] - b[i]) > 1e-6) cnt++; } return cnt; } void gpuTranspose (dtype* A, dtype* AT, int N) { struct stopwatch_t* timer = NULL; long double t_gpu; /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* run your kernel here */ dtype *d_idata, *d_odata; CUDA_CHECK_ERROR(cudaMalloc (&d_idata, N*N*sizeof(dtype)); CUDA_CHECK_ERROR(cudaMalloc (&d_odata, N*N*sizeof(dtype)); CUDA_CHECK_ERROR(cudaMemcpy (d_idata, A, N*N*sizeof(dtype), cudaMemcpyHostToDevice); dim3 grid(N/K+1, N/K+1, 1); dim3 block(K, K, 1); matTrans <<<grid, block>>> (d_odata, d_idata, N); CUDA_CHECK_ERROR(cudaMemcpy (AT, d_odata, N*N*sizeof(dtype), cudaMemcpyDeviceToHost); cudaThreadSynchronize (); t_gpu = stopwatch_stop (timer); fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 ); } int main(int argc, char** argv) { /* variables */ dtype *A, *ATgpu, *ATcpu; int err; int N; struct stopwatch_t* timer = NULL; long double t_cpu; N = -1; parseArg (argc, argv, &N); /* input and output matrices on host */ /* output */ ATcpu = (dtype*) malloc (N * N * sizeof (dtype)); ATgpu = (dtype*) malloc (N * N * sizeof (dtype)); /* input */ A = (dtype*) malloc (N * N * sizeof (dtype)); initArr (A, N * N); /* GPU transpose kernel */ gpuTranspose (A, ATgpu, N); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* compute reference array */ cpuTranspose (A, ATcpu, N); t_cpu = stopwatch_stop (timer); fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n", t_cpu); /* check correctness */ err = cmpArr (ATgpu, ATcpu, N * N); if(err) { fprintf (stderr, "Transpose failed: %d\n", err); } else { fprintf (stderr, "Transpose successful\n"); } free (A); free (ATgpu); free (ATcpu); return 0; }
b140f8e124e4c7c132971415c78d2e72ac798ee3.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2015 Kai Zhang (kay21s@gmail.com) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #define EMULATE_NVM_BW #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <assert.h> #include <time.h> #include <hip/hip_runtime.h> #include <chrono> #include "gpu_hash.h" #include "libgpm.cuh" #include "bandwidth_analysis.cuh" //GTX 480 has 14 SM, and M2090 has 16 SM //#define INSERT_BLOCK 16 defined in gpu_hash.h #define HASH_BLOCK_ELEM_NUM (BUC_NUM/INSERT_BLOCK) #define BLOCK_ELEM_NUM (SELEM_NUM/INSERT_BLOCK) double persist_time = 0, operation_time = 0, ddio_time = 0; //#define KERNEL 1 int main(int argc, char *argv[]) { ddio_on(); int SELEM_NUM, THREAD_NUM; if (argc != 3) { SELEM_NUM = 16384 * 128; THREAD_NUM = 16384 * 2; printf("usage: ./run #elem_num #thread_num, now running with %d\n", THREAD_NUM); } else { SELEM_NUM = atoi(argv[1]); THREAD_NUM = atoi(argv[2]); } printf("elem_num is %d, thread_num is %d\n", SELEM_NUM, THREAD_NUM); uint8_t *device_hash_table; uint8_t *device_in; uint8_t *host_in; ielem_t *blk_input_h[INSERT_BLOCK]; int blk_elem_num_h[INSERT_BLOCK]; ielem_t **blk_input_d; int *blk_elem_num_d; double diff; int i; struct timespec start, end; #if defined(KERNEL) struct timespec kernel_start; #endif uint8_t *device_search_in; uint8_t *device_search_out; uint8_t *host_search_in; uint8_t *host_search_out; uint8_t *host_search_verify; //CUDA_SAFE_CALL(hipMalloc((void **)&(device_hash_table), HT_SIZE)); size_t file_size = HT_SIZE; device_hash_table = (uint8_t*)gpm_map_file("./imkv.out", file_size, 1); CUDA_SAFE_CALL(hipMemset((void *)device_hash_table, 0, HT_SIZE)); CUDA_SAFE_CALL(hipMalloc((void **)&(device_in), SELEM_NUM * sizeof(ielem_t))); CUDA_SAFE_CALL(hipMemset((void *)device_in, 0, SELEM_NUM * sizeof(ielem_t))); CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_in), SELEM_NUM * sizeof(ielem_t), hipHostMallocDefault)); CUDA_SAFE_CALL(hipMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *))); CUDA_SAFE_CALL(hipMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int))); for (i = 0; i < INSERT_BLOCK; i ++) { blk_input_h[i] = &(((ielem_t *)device_in)[i*(SELEM_NUM/INSERT_BLOCK)]); blk_elem_num_h[i] = SELEM_NUM/INSERT_BLOCK; } CUDA_SAFE_CALL(hipMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), hipMemcpyHostToDevice)); // for search CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_in), SELEM_NUM * sizeof(selem_t))); CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_in), SELEM_NUM * sizeof(selem_t), hipHostMallocDefault)); CUDA_SAFE_CALL(hipMalloc((void **)&(device_search_out), 2 * SELEM_NUM * sizeof(loc_t))); CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_out), 2 * SELEM_NUM * sizeof(loc_t), hipHostMallocDefault)); CUDA_SAFE_CALL(hipHostMalloc((void **)&(host_search_verify), SELEM_NUM * sizeof(loc_t), hipHostMallocDefault)); //host_search_verify = (uint8_t *)malloc(SELEM_NUM * sizeof(loc_t)); // start CUDA_SAFE_CALL(hipDeviceSynchronize()); int has, lower_bond; srand(time(NULL)); double ins_time = 0; double del_time = 0; int num_inserts = 25; has = 0; for (has = 0; has < num_inserts/*has < 0.1 * HT_SIZE/(sizeof(sign_t) + sizeof(loc_t))*/; has++) { printf("%d : Load factor: %f, exisiting number : %d.\n", has, (double)has*SELEM_NUM/(HT_SIZE/(sizeof(sign_t)+sizeof(loc_t))), has*SELEM_NUM); /* +++++++++++++++++++++++++++++++++++ INSERT +++++++++++++++++++++++++++++++++ */ for (i = 0; i < SELEM_NUM; i += 1) { lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM; // sig ((selem_t *)host_search_in)[i].sig = ((ielem_t *)host_in)[i].sig = rand(); // hash ((selem_t *)host_search_in)[i].hash = ((ielem_t *)host_in)[i].hash = lower_bond + rand() % HASH_BLOCK_ELEM_NUM; // loc ((loc_t *)host_search_verify)[i] = ((ielem_t *)host_in)[i].loc = (loc_t)rand(); //printf("%d\n", ((int *)host_search_verify)[i]); } //for debugging for (i = 0; i < SELEM_NUM; i += 1) { //printf("%d %d %d\n", ((int *)host_in)[i*3], (i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM, //(i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM + BLOCK_ELEM_NUM); assert(((ielem_t *)host_in)[i].hash < (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM + HASH_BLOCK_ELEM_NUM); assert(((ielem_t *)host_in)[i].hash >= (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM); } clock_gettime(CLOCK_MONOTONIC, &start); CUDA_SAFE_CALL(hipMemcpy(device_in, host_in, SELEM_NUM * sizeof(ielem_t), hipMemcpyHostToDevice)); hipDeviceSynchronize(); //START_BW_MONITOR2("bw_gpm_kvs.csv"); gpu_hash_insert((bucket_t *)device_hash_table, (ielem_t **)blk_input_d, (int *)blk_elem_num_d, INSERT_BLOCK, SELEM_NUM, 0, operation_time, ddio_time, persist_time); CUDA_SAFE_CALL(hipDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC, &end); //STOP_BW_MONITOR OUTPUT_STATS diff = 1000000 * (end.tv_sec-start.tv_sec) + (double)(end.tv_nsec-start.tv_nsec)/1000; printf("With Memcpy, the difference is %.2lf us, speed is %.2f Mops\n", (double)diff, (double)(SELEM_NUM) / diff); ins_time += diff/ 1000.0f; #if 1 /* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */ // verify with search CUDA_SAFE_CALL(hipMemcpy(device_search_in, host_search_in, SELEM_NUM * sizeof(selem_t), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t))); //for(int iters = 0; iters < 10; ++iters) { //auto search_start = std::chrono::high_resolution_clock::now(); gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out, (bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0); hipDeviceSynchronize(); //search_time += (double)std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - search_start).count() / 1000.0; //} CUDA_SAFE_CALL(hipMemcpy(host_search_out, device_search_out, 2 * SELEM_NUM * sizeof(loc_t), hipMemcpyDeviceToHost)); for (i = 0; i < SELEM_NUM; i ++) { if(((loc_t *)host_search_out)[i<<1] != ((loc_t *)host_search_verify)[i] && ((loc_t *)host_search_out)[(i<<1)+1] != ((loc_t *)host_search_verify)[i]) { printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i, ((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1], ((loc_t *)host_search_verify)[i]); /* for debugging ((int *)host_in)[0] = ((int *)host_in)[i*3]; ((int *)host_in)[1] = ((int *)host_in)[i*3+1]; ((int *)host_in)[2] = ((int *)host_in)[i*3+2]; CUDA_SAFE_CALL(hipMemcpy(device_in, host_in, sizeof(ielem_t), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemset((void *)device_out, 0, SELEM_NUM * sizeof(loc_t))); gpu_hash_insert((bucket_t *)device_hash_table, (ielem_t **)blk_input_d, (loc_t **)blk_output_d, (int *)blk_elem_num_d, INSERT_BLOCK, 0); */ } } #endif #ifdef RESTORE_FLAG /* +++++++++++++++++++++++++++++++++++ DELETE +++++++++++++++++++++++++++++++++ */ CUDA_SAFE_CALL(hipDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC, &start); recover_insert((bucket_t *)device_hash_table); CUDA_SAFE_CALL(hipDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC, &end); diff = 1000000 * (end.tv_sec-start.tv_sec) + (double)(end.tv_nsec-start.tv_nsec)/1000; printf("DELETE, the difference is %.2lf us, speed is %.2f Mops\n", (double)diff, (double)(SELEM_NUM) / diff); del_time += diff / 1000.0f; // verify with search CUDA_SAFE_CALL(hipMemcpy(device_search_in, host_search_in, SELEM_NUM * sizeof(selem_t), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t))); gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out, (bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0); hipDeviceSynchronize(); CUDA_SAFE_CALL(hipMemcpy(host_search_out, device_search_out, 2 * SELEM_NUM * sizeof(loc_t), hipMemcpyDeviceToHost)); for (i = 0; i < SELEM_NUM; i ++) { if(((loc_t *)host_search_out)[i<<1] == ((loc_t *)host_search_verify)[i] || ((loc_t *)host_search_out)[(i<<1)+1] == ((loc_t *)host_search_verify)[i]) { printf("found insertion %d : out %lx and %lx, should be : %lx\n", i, ((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1], ((loc_t *)host_search_verify)[i]); assert(false); } } #endif } printf("\nOperation execution time: %f ms\n", operation_time/1000000.0); printf("DDIOTime:\t%f\tms\nPersistTime\t%f\n", ddio_time/1000000.0, persist_time/1000000.0); printf("Runtime\t%f\tms\n", ins_time); printf("Recovery\t%f\tms\n", del_time); return 0; }
b140f8e124e4c7c132971415c78d2e72ac798ee3.cu
/* * Copyright (c) 2015 Kai Zhang (kay21s@gmail.com) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #define EMULATE_NVM_BW #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <assert.h> #include <time.h> #include <cuda_runtime.h> #include <chrono> #include "gpu_hash.h" #include "libgpm.cuh" #include "bandwidth_analysis.cuh" //GTX 480 has 14 SM, and M2090 has 16 SM //#define INSERT_BLOCK 16 defined in gpu_hash.h #define HASH_BLOCK_ELEM_NUM (BUC_NUM/INSERT_BLOCK) #define BLOCK_ELEM_NUM (SELEM_NUM/INSERT_BLOCK) double persist_time = 0, operation_time = 0, ddio_time = 0; //#define KERNEL 1 int main(int argc, char *argv[]) { ddio_on(); int SELEM_NUM, THREAD_NUM; if (argc != 3) { SELEM_NUM = 16384 * 128; THREAD_NUM = 16384 * 2; printf("usage: ./run #elem_num #thread_num, now running with %d\n", THREAD_NUM); } else { SELEM_NUM = atoi(argv[1]); THREAD_NUM = atoi(argv[2]); } printf("elem_num is %d, thread_num is %d\n", SELEM_NUM, THREAD_NUM); uint8_t *device_hash_table; uint8_t *device_in; uint8_t *host_in; ielem_t *blk_input_h[INSERT_BLOCK]; int blk_elem_num_h[INSERT_BLOCK]; ielem_t **blk_input_d; int *blk_elem_num_d; double diff; int i; struct timespec start, end; #if defined(KERNEL) struct timespec kernel_start; #endif uint8_t *device_search_in; uint8_t *device_search_out; uint8_t *host_search_in; uint8_t *host_search_out; uint8_t *host_search_verify; //CUDA_SAFE_CALL(cudaMalloc((void **)&(device_hash_table), HT_SIZE)); size_t file_size = HT_SIZE; device_hash_table = (uint8_t*)gpm_map_file("./imkv.out", file_size, 1); CUDA_SAFE_CALL(cudaMemset((void *)device_hash_table, 0, HT_SIZE)); CUDA_SAFE_CALL(cudaMalloc((void **)&(device_in), SELEM_NUM * sizeof(ielem_t))); CUDA_SAFE_CALL(cudaMemset((void *)device_in, 0, SELEM_NUM * sizeof(ielem_t))); CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_in), SELEM_NUM * sizeof(ielem_t), cudaHostAllocDefault)); CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_input_d), INSERT_BLOCK * sizeof(ielem_t *))); CUDA_SAFE_CALL(cudaMalloc((void **)&(blk_elem_num_d), INSERT_BLOCK * sizeof(int))); for (i = 0; i < INSERT_BLOCK; i ++) { blk_input_h[i] = &(((ielem_t *)device_in)[i*(SELEM_NUM/INSERT_BLOCK)]); blk_elem_num_h[i] = SELEM_NUM/INSERT_BLOCK; } CUDA_SAFE_CALL(cudaMemcpy(blk_input_d, blk_input_h, INSERT_BLOCK * sizeof(void *), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(blk_elem_num_d, blk_elem_num_h, INSERT_BLOCK * sizeof(int), cudaMemcpyHostToDevice)); // for search CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_in), SELEM_NUM * sizeof(selem_t))); CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_in), SELEM_NUM * sizeof(selem_t), cudaHostAllocDefault)); CUDA_SAFE_CALL(cudaMalloc((void **)&(device_search_out), 2 * SELEM_NUM * sizeof(loc_t))); CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_out), 2 * SELEM_NUM * sizeof(loc_t), cudaHostAllocDefault)); CUDA_SAFE_CALL(cudaHostAlloc((void **)&(host_search_verify), SELEM_NUM * sizeof(loc_t), cudaHostAllocDefault)); //host_search_verify = (uint8_t *)malloc(SELEM_NUM * sizeof(loc_t)); // start CUDA_SAFE_CALL(cudaDeviceSynchronize()); int has, lower_bond; srand(time(NULL)); double ins_time = 0; double del_time = 0; int num_inserts = 25; has = 0; for (has = 0; has < num_inserts/*has < 0.1 * HT_SIZE/(sizeof(sign_t) + sizeof(loc_t))*/; has++) { printf("%d : Load factor: %f, exisiting number : %d.\n", has, (double)has*SELEM_NUM/(HT_SIZE/(sizeof(sign_t)+sizeof(loc_t))), has*SELEM_NUM); /* +++++++++++++++++++++++++++++++++++ INSERT +++++++++++++++++++++++++++++++++ */ for (i = 0; i < SELEM_NUM; i += 1) { lower_bond = (i / BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM; // sig ((selem_t *)host_search_in)[i].sig = ((ielem_t *)host_in)[i].sig = rand(); // hash ((selem_t *)host_search_in)[i].hash = ((ielem_t *)host_in)[i].hash = lower_bond + rand() % HASH_BLOCK_ELEM_NUM; // loc ((loc_t *)host_search_verify)[i] = ((ielem_t *)host_in)[i].loc = (loc_t)rand(); //printf("%d\n", ((int *)host_search_verify)[i]); } //for debugging for (i = 0; i < SELEM_NUM; i += 1) { //printf("%d %d %d\n", ((int *)host_in)[i*3], (i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM, //(i/BLOCK_ELEM_NUM) * BLOCK_ELEM_NUM + BLOCK_ELEM_NUM); assert(((ielem_t *)host_in)[i].hash < (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM + HASH_BLOCK_ELEM_NUM); assert(((ielem_t *)host_in)[i].hash >= (i/BLOCK_ELEM_NUM) * HASH_BLOCK_ELEM_NUM); } clock_gettime(CLOCK_MONOTONIC, &start); CUDA_SAFE_CALL(cudaMemcpy(device_in, host_in, SELEM_NUM * sizeof(ielem_t), cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); //START_BW_MONITOR2("bw_gpm_kvs.csv"); gpu_hash_insert((bucket_t *)device_hash_table, (ielem_t **)blk_input_d, (int *)blk_elem_num_d, INSERT_BLOCK, SELEM_NUM, 0, operation_time, ddio_time, persist_time); CUDA_SAFE_CALL(cudaDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC, &end); //STOP_BW_MONITOR OUTPUT_STATS diff = 1000000 * (end.tv_sec-start.tv_sec) + (double)(end.tv_nsec-start.tv_nsec)/1000; printf("With Memcpy, the difference is %.2lf us, speed is %.2f Mops\n", (double)diff, (double)(SELEM_NUM) / diff); ins_time += diff/ 1000.0f; #if 1 /* +++++++++++++++++++++++++++++++++++ SEARCH +++++++++++++++++++++++++++++++++ */ // verify with search CUDA_SAFE_CALL(cudaMemcpy(device_search_in, host_search_in, SELEM_NUM * sizeof(selem_t), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t))); //for(int iters = 0; iters < 10; ++iters) { //auto search_start = std::chrono::high_resolution_clock::now(); gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out, (bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0); cudaDeviceSynchronize(); //search_time += (double)std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - search_start).count() / 1000.0; //} CUDA_SAFE_CALL(cudaMemcpy(host_search_out, device_search_out, 2 * SELEM_NUM * sizeof(loc_t), cudaMemcpyDeviceToHost)); for (i = 0; i < SELEM_NUM; i ++) { if(((loc_t *)host_search_out)[i<<1] != ((loc_t *)host_search_verify)[i] && ((loc_t *)host_search_out)[(i<<1)+1] != ((loc_t *)host_search_verify)[i]) { printf("not found insertion %d : out %lx and %lx, should be : %lx\n", i, ((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1], ((loc_t *)host_search_verify)[i]); /* for debugging ((int *)host_in)[0] = ((int *)host_in)[i*3]; ((int *)host_in)[1] = ((int *)host_in)[i*3+1]; ((int *)host_in)[2] = ((int *)host_in)[i*3+2]; CUDA_SAFE_CALL(cudaMemcpy(device_in, host_in, sizeof(ielem_t), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemset((void *)device_out, 0, SELEM_NUM * sizeof(loc_t))); gpu_hash_insert((bucket_t *)device_hash_table, (ielem_t **)blk_input_d, (loc_t **)blk_output_d, (int *)blk_elem_num_d, INSERT_BLOCK, 0); */ } } #endif #ifdef RESTORE_FLAG /* +++++++++++++++++++++++++++++++++++ DELETE +++++++++++++++++++++++++++++++++ */ CUDA_SAFE_CALL(cudaDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC, &start); recover_insert((bucket_t *)device_hash_table); CUDA_SAFE_CALL(cudaDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC, &end); diff = 1000000 * (end.tv_sec-start.tv_sec) + (double)(end.tv_nsec-start.tv_nsec)/1000; printf("DELETE, the difference is %.2lf us, speed is %.2f Mops\n", (double)diff, (double)(SELEM_NUM) / diff); del_time += diff / 1000.0f; // verify with search CUDA_SAFE_CALL(cudaMemcpy(device_search_in, host_search_in, SELEM_NUM * sizeof(selem_t), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemset((void *)device_search_out, 0, 2 * SELEM_NUM * sizeof(loc_t))); gpu_hash_search((selem_t *)device_search_in, (loc_t *)device_search_out, (bucket_t *)device_hash_table, SELEM_NUM, THREAD_NUM, 128, 0); cudaDeviceSynchronize(); CUDA_SAFE_CALL(cudaMemcpy(host_search_out, device_search_out, 2 * SELEM_NUM * sizeof(loc_t), cudaMemcpyDeviceToHost)); for (i = 0; i < SELEM_NUM; i ++) { if(((loc_t *)host_search_out)[i<<1] == ((loc_t *)host_search_verify)[i] || ((loc_t *)host_search_out)[(i<<1)+1] == ((loc_t *)host_search_verify)[i]) { printf("found insertion %d : out %lx and %lx, should be : %lx\n", i, ((loc_t *)host_search_out)[i<<1], ((loc_t *)host_search_out)[(i<<1)+1], ((loc_t *)host_search_verify)[i]); assert(false); } } #endif } printf("\nOperation execution time: %f ms\n", operation_time/1000000.0); printf("DDIOTime:\t%f\tms\nPersistTime\t%f\n", ddio_time/1000000.0, persist_time/1000000.0); printf("Runtime\t%f\tms\n", ins_time); printf("Recovery\t%f\tms\n", del_time); return 0; }
da0667f52667e8d34c96024f568ffe8f72483517.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017-2018 by Contributors */ #include <dmlc/parameter.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <memory> #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); /** * \struct DevicePredictionNode * * \brief Packed 16 byte representation of a tree node for use in device * prediction */ struct DevicePredictionNode { XGBOOST_DEVICE DevicePredictionNode() : fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {} union NodeValue { float leaf_weight; float fvalue; }; int fidx; int left_child_idx; int right_child_idx; NodeValue val{}; DevicePredictionNode(const RegTree::Node& n) { // NOLINT static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes"); this->left_child_idx = n.LeftChild(); this->right_child_idx = n.RightChild(); this->fidx = n.SplitIndex(); if (n.DefaultLeft()) { fidx |= (1U << 31); } if (n.IsLeaf()) { this->val.leaf_weight = n.LeafValue(); } else { this->val.fvalue = n.SplitCond(); } } XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; } XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); } XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; } XGBOOST_DEVICE int MissingIdx() const { if (MissingLeft()) { return this->left_child_idx; } else { return this->right_child_idx; } } XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; } XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; } }; struct ElementLoader { bool use_shared; common::Span<const size_t> d_row_ptr; common::Span<const Entry> d_data; int num_features; float* smem; size_t entry_start; __device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr, common::Span<const Entry> entry, int num_features, float* smem, int num_rows, size_t entry_start) : use_shared(use_shared), d_row_ptr(row_ptr), d_data(entry), num_features(num_features), smem(smem), entry_start(entry_start) { // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = d_row_ptr[global_idx]; bst_uint elem_end = d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = d_data[elem_idx - entry_start]; smem[threadIdx.x * num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetFvalue(int ridx, int fidx) { if (use_shared) { return smem[threadIdx.x * num_features + fidx]; } else { // Binary search auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start); auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start); common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } } }; __device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree, ElementLoader* loader) { DevicePredictionNode n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetFvalue(ridx, n.GetFidx()); // Missing value if (isnan(fvalue)) { n = tree[n.MissingIdx()]; } else { if (fvalue < n.GetFvalue()) { n = tree[n.left_child_idx]; } else { n = tree[n.right_child_idx]; } } } return n.GetWeight(); } template <int BLOCK_THREADS> __global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, common::Span<const size_t> d_row_ptr, common::Span<const Entry> d_data, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { extern __shared__ float smem[]; bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const DevicePredictionNode* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; sum += GetLeafWeight(global_idx, d_tree, &loader); } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const DevicePredictionNode* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class GPUPredictor : public xgboost::Predictor { private: void InitModel(const gbm::GBTreeModel& model, const thrust::host_vector<size_t>& h_tree_segments, const thrust::host_vector<DevicePredictionNode>& h_nodes, size_t tree_begin, size_t tree_end) { dh::safe_cuda(hipSetDevice(device_)); nodes_.resize(h_nodes.size()); dh::safe_cuda(hipMemcpyAsync(nodes_.data().get(), h_nodes.data(), sizeof(DevicePredictionNode) * h_nodes.size(), hipMemcpyHostToDevice)); tree_segments_.resize(h_tree_segments.size()); dh::safe_cuda(hipMemcpyAsync(tree_segments_.data().get(), h_tree_segments.data(), sizeof(size_t) * h_tree_segments.size(), hipMemcpyHostToDevice)); tree_group_.resize(model.tree_info.size()); dh::safe_cuda(hipMemcpyAsync(tree_group_.data().get(), model.tree_info.data(), sizeof(int) * model.tree_info.size(), hipMemcpyHostToDevice)); this->tree_begin_ = tree_begin; this->tree_end_ = tree_end; this->num_group_ = model.param.num_output_group; } void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { dh::safe_cuda(hipSetDevice(device_)); const int BLOCK_THREADS = 128; size_t num_rows = batch.Size(); const int GRID_SIZE = static_cast<int>(common::DivRoundUp(num_rows, BLOCK_THREADS)); int shared_memory_bytes = static_cast<int> (sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; hipLaunchKernelGGL(( PredictKernel<BLOCK_THREADS>), dim3(GRID_SIZE), dim3(BLOCK_THREADS), shared_memory_bytes, 0, dh::ToSpan(nodes_), predictions->DeviceSpan().subspan(batch_offset), dh::ToSpan(tree_segments_), dh::ToSpan(tree_group_), batch.offset.DeviceSpan(), batch.data.DeviceSpan(), this->tree_begin_, this->tree_end_, num_features, num_rows, entry_start, use_shared, this->num_group_); } void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device thrust::host_vector<size_t> h_tree_segments{}; h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back()); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { if (tree_end - tree_begin == 0) { return; } monitor_.StartCuda("DevicePredictInternal"); InitModel(model, tree_begin, tree_end); size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { batch.offset.SetDevice(device_); batch.data.SetDevice(device_); PredictInternal(batch, model.param.num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.param.num_output_group; } monitor_.StopCuda("DevicePredictInternal"); } public: GPUPredictor() : device_{-1} {} ~GPUPredictor() override { if (device_ >= 0) { dh::safe_cuda(hipSetDevice(device_)); } } void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { int device = learner_param_->gpu_id; CHECK_GE(device, 0); ConfigureDevice(device); if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) { return; } this->InitOutPredictions(dmat->Info(), out_preds, model); int tree_end = ntree_limit * model.param.num_output_group; if (ntree_limit == 0 || ntree_limit > model.trees.size()) { tree_end = static_cast<unsigned>(model.trees.size()); } DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end); } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.param.num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(device_); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.base_margin); } } bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) { if (ntree_limit == 0 || ntree_limit * model.param.num_output_group >= model.trees.size()) { auto it = cache_.find(dmat); if (it != cache_.end()) { const HostDeviceVector<bst_float>& y = it->second.predictions; if (y.Size() != 0) { monitor_.StartCuda("PredictFromCache"); out_preds->SetDevice(y.DeviceIdx()); out_preds->Resize(y.Size()); out_preds->Copy(y); monitor_.StopCuda("PredictFromCache"); return true; } } } return false; } void UpdatePredictionCache( const gbm::GBTreeModel& model, std::vector<std::unique_ptr<TreeUpdater>>* updaters, int num_new_trees) override { auto old_ntree = model.trees.size() - num_new_trees; // update cache entry for (auto& kv : cache_) { PredictionCacheEntry& e = kv.second; DMatrix* dmat = kv.first; HostDeviceVector<bst_float>& predictions = e.predictions; if (predictions.Size() == 0) { this->InitOutPredictions(dmat->Info(), &predictions, model); } if (model.param.num_output_group == 1 && updaters->size() > 0 && num_new_trees == 1 && updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) { // do nothing } else { DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size()); } } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit, unsigned root_index) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, bool approximate, int condition, unsigned condition_feature) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, bool approximate) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg, const std::vector<std::shared_ptr<DMatrix>>& cache) override { Predictor::Configure(cfg, cache); int device = learner_param_->gpu_id; if (device >= 0) { ConfigureDevice(device); } } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device_ == device) return; device_ = device; if (device_ >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device_); } } int device_; common::Monitor monitor_; dh::device_vector<DevicePredictionNode> nodes_; dh::device_vector<size_t> tree_segments_; dh::device_vector<int> tree_group_; size_t max_shared_memory_bytes_; size_t tree_begin_; size_t tree_end_; int num_group_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([]() { return new GPUPredictor(); }); } // namespace predictor } // namespace xgboost
da0667f52667e8d34c96024f568ffe8f72483517.cu
/*! * Copyright 2017-2018 by Contributors */ #include <dmlc/parameter.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <memory> #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); /** * \struct DevicePredictionNode * * \brief Packed 16 byte representation of a tree node for use in device * prediction */ struct DevicePredictionNode { XGBOOST_DEVICE DevicePredictionNode() : fidx{-1}, left_child_idx{-1}, right_child_idx{-1} {} union NodeValue { float leaf_weight; float fvalue; }; int fidx; int left_child_idx; int right_child_idx; NodeValue val{}; DevicePredictionNode(const RegTree::Node& n) { // NOLINT static_assert(sizeof(DevicePredictionNode) == 16, "Size is not 16 bytes"); this->left_child_idx = n.LeftChild(); this->right_child_idx = n.RightChild(); this->fidx = n.SplitIndex(); if (n.DefaultLeft()) { fidx |= (1U << 31); } if (n.IsLeaf()) { this->val.leaf_weight = n.LeafValue(); } else { this->val.fvalue = n.SplitCond(); } } XGBOOST_DEVICE bool IsLeaf() const { return left_child_idx == -1; } XGBOOST_DEVICE int GetFidx() const { return fidx & ((1U << 31) - 1U); } XGBOOST_DEVICE bool MissingLeft() const { return (fidx >> 31) != 0; } XGBOOST_DEVICE int MissingIdx() const { if (MissingLeft()) { return this->left_child_idx; } else { return this->right_child_idx; } } XGBOOST_DEVICE float GetFvalue() const { return val.fvalue; } XGBOOST_DEVICE float GetWeight() const { return val.leaf_weight; } }; struct ElementLoader { bool use_shared; common::Span<const size_t> d_row_ptr; common::Span<const Entry> d_data; int num_features; float* smem; size_t entry_start; __device__ ElementLoader(bool use_shared, common::Span<const size_t> row_ptr, common::Span<const Entry> entry, int num_features, float* smem, int num_rows, size_t entry_start) : use_shared(use_shared), d_row_ptr(row_ptr), d_data(entry), num_features(num_features), smem(smem), entry_start(entry_start) { // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = d_row_ptr[global_idx]; bst_uint elem_end = d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = d_data[elem_idx - entry_start]; smem[threadIdx.x * num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetFvalue(int ridx, int fidx) { if (use_shared) { return smem[threadIdx.x * num_features + fidx]; } else { // Binary search auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start); auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start); common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } } }; __device__ float GetLeafWeight(bst_uint ridx, const DevicePredictionNode* tree, ElementLoader* loader) { DevicePredictionNode n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetFvalue(ridx, n.GetFidx()); // Missing value if (isnan(fvalue)) { n = tree[n.MissingIdx()]; } else { if (fvalue < n.GetFvalue()) { n = tree[n.left_child_idx]; } else { n = tree[n.right_child_idx]; } } } return n.GetWeight(); } template <int BLOCK_THREADS> __global__ void PredictKernel(common::Span<const DevicePredictionNode> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, common::Span<const size_t> d_row_ptr, common::Span<const Entry> d_data, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { extern __shared__ float smem[]; bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; ElementLoader loader(use_shared, d_row_ptr, d_data, num_features, smem, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const DevicePredictionNode* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; sum += GetLeafWeight(global_idx, d_tree, &loader); } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const DevicePredictionNode* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class GPUPredictor : public xgboost::Predictor { private: void InitModel(const gbm::GBTreeModel& model, const thrust::host_vector<size_t>& h_tree_segments, const thrust::host_vector<DevicePredictionNode>& h_nodes, size_t tree_begin, size_t tree_end) { dh::safe_cuda(cudaSetDevice(device_)); nodes_.resize(h_nodes.size()); dh::safe_cuda(cudaMemcpyAsync(nodes_.data().get(), h_nodes.data(), sizeof(DevicePredictionNode) * h_nodes.size(), cudaMemcpyHostToDevice)); tree_segments_.resize(h_tree_segments.size()); dh::safe_cuda(cudaMemcpyAsync(tree_segments_.data().get(), h_tree_segments.data(), sizeof(size_t) * h_tree_segments.size(), cudaMemcpyHostToDevice)); tree_group_.resize(model.tree_info.size()); dh::safe_cuda(cudaMemcpyAsync(tree_group_.data().get(), model.tree_info.data(), sizeof(int) * model.tree_info.size(), cudaMemcpyHostToDevice)); this->tree_begin_ = tree_begin; this->tree_end_ = tree_end; this->num_group_ = model.param.num_output_group; } void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { dh::safe_cuda(cudaSetDevice(device_)); const int BLOCK_THREADS = 128; size_t num_rows = batch.Size(); const int GRID_SIZE = static_cast<int>(common::DivRoundUp(num_rows, BLOCK_THREADS)); int shared_memory_bytes = static_cast<int> (sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; PredictKernel<BLOCK_THREADS><<<GRID_SIZE, BLOCK_THREADS, shared_memory_bytes>>> (dh::ToSpan(nodes_), predictions->DeviceSpan().subspan(batch_offset), dh::ToSpan(tree_segments_), dh::ToSpan(tree_group_), batch.offset.DeviceSpan(), batch.data.DeviceSpan(), this->tree_begin_, this->tree_end_, num_features, num_rows, entry_start, use_shared, this->num_group_); } void InitModel(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device thrust::host_vector<size_t> h_tree_segments{}; h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } thrust::host_vector<DevicePredictionNode> h_nodes(h_tree_segments.back()); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } InitModel(model, h_tree_segments, h_nodes, tree_begin, tree_end); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { if (tree_end - tree_begin == 0) { return; } monitor_.StartCuda("DevicePredictInternal"); InitModel(model, tree_begin, tree_end); size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { batch.offset.SetDevice(device_); batch.data.SetDevice(device_); PredictInternal(batch, model.param.num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.param.num_output_group; } monitor_.StopCuda("DevicePredictInternal"); } public: GPUPredictor() : device_{-1} {} ~GPUPredictor() override { if (device_ >= 0) { dh::safe_cuda(cudaSetDevice(device_)); } } void PredictBatch(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { int device = learner_param_->gpu_id; CHECK_GE(device, 0); ConfigureDevice(device); if (this->PredictFromCache(dmat, out_preds, model, ntree_limit)) { return; } this->InitOutPredictions(dmat->Info(), out_preds, model); int tree_end = ntree_limit * model.param.num_output_group; if (ntree_limit == 0 || ntree_limit > model.trees.size()) { tree_end = static_cast<unsigned>(model.trees.size()); } DevicePredictInternal(dmat, out_preds, model, tree_begin, tree_end); } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.param.num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(device_); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.base_margin); } } bool PredictFromCache(DMatrix* dmat, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) { if (ntree_limit == 0 || ntree_limit * model.param.num_output_group >= model.trees.size()) { auto it = cache_.find(dmat); if (it != cache_.end()) { const HostDeviceVector<bst_float>& y = it->second.predictions; if (y.Size() != 0) { monitor_.StartCuda("PredictFromCache"); out_preds->SetDevice(y.DeviceIdx()); out_preds->Resize(y.Size()); out_preds->Copy(y); monitor_.StopCuda("PredictFromCache"); return true; } } } return false; } void UpdatePredictionCache( const gbm::GBTreeModel& model, std::vector<std::unique_ptr<TreeUpdater>>* updaters, int num_new_trees) override { auto old_ntree = model.trees.size() - num_new_trees; // update cache entry for (auto& kv : cache_) { PredictionCacheEntry& e = kv.second; DMatrix* dmat = kv.first; HostDeviceVector<bst_float>& predictions = e.predictions; if (predictions.Size() == 0) { this->InitOutPredictions(dmat->Info(), &predictions, model); } if (model.param.num_output_group == 1 && updaters->size() > 0 && num_new_trees == 1 && updaters->back()->UpdatePredictionCache(e.data.get(), &predictions)) { // do nothing } else { DevicePredictInternal(dmat, &predictions, model, old_ntree, model.trees.size()); } } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit, unsigned root_index) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, bool approximate, int condition, unsigned condition_feature) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, bool approximate) override { LOG(FATAL) << "Internal error: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg, const std::vector<std::shared_ptr<DMatrix>>& cache) override { Predictor::Configure(cfg, cache); int device = learner_param_->gpu_id; if (device >= 0) { ConfigureDevice(device); } } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device_ == device) return; device_ = device; if (device_ >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device_); } } int device_; common::Monitor monitor_; dh::device_vector<DevicePredictionNode> nodes_; dh::device_vector<size_t> tree_segments_; dh::device_vector<int> tree_group_; size_t max_shared_memory_bytes_; size_t tree_begin_; size_t tree_end_; int num_group_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([]() { return new GPUPredictor(); }); } // namespace predictor } // namespace xgboost
71ba4e92744e9b5ceacc8852f385664ac4585c32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward #include "NeighborListGPUTree_hip.cuh" #include "hoomd/TextureTools.h" #include "hoomd/extern/hipcub/hipcub.hpp" #define MORTON_CODE_BITS 30 //!< Length of the Morton code in bits (k = 10 bits per direction) #define MORTON_CODE_N_BINS 1024 //!< Number of bins (2^10) per direction to generate 30 bit Morton codes #define MORTON_TYPE_MASK_64 0x000000003fffffffu //!< 64 bit mask to turn morton code-type back to morton code /*! \file NeighborListGPUTree.cu \brief Defines GPU kernel code for neighbor list tree traversal on the GPU */ //! Texture for reading particle positions scalar4_tex_t pdata_pos_tex; //! Texture for reading leaf data scalar4_tex_t leaf_xyzf_tex; //! Texture for the diameter / body scalar2_tex_t leaf_db_tex; //! Texture for reading node upper and lower bounds scalar4_tex_t aabb_node_bounds_tex; //! Texture for the head list texture<unsigned int, 1, hipReadModeElementType> head_list_tex; //!< Expands a 10-bit integer into 30 bits by inserting 2 zeros after each bit. /*! * \param v unsigned integer with 10 bits set * \returns The integer expanded with two zeros interleaved between bits * http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/ */ __device__ inline unsigned int expandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } //! Assigns the Morton code-type key for each particle on this processor /*! * \param d_morton_types Morton code-type keys per particle * \param d_map_tree_pid List to be overwritten with particle ids in ascending order * \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds * \param d_pos Particle positions * \param N Number of local particles * \param nghosts Number of ghost particles * \param box Local simulation box * \param ghost_width Anticipated size of the ghost layer for nonbonded interactions * * \b Implementation * A sorting key is generated for each particle by determining the 30 bit Morton code for each particle, and then * concatenating onto the type. Both the Morton code and the type are 32 bit integers, so the concatenation is stored * compactly in a 64 bit integer morton_type = (type << 30) + morton code. In this way, a lexicographic sort will * sort first by type, then by morton code. The corresponding particle id (thread index) is stashed into d_map_tree_pid * to track particles after sorting. */ __global__ void gpu_nlist_morton_types_kernel(uint64_t *d_morton_types, unsigned int *d_map_tree_pid, int *d_morton_conditions, const Scalar4 *d_pos, const unsigned int N, const unsigned int nghosts, const BoxDim box, const Scalar3 ghost_width) { // compute the particle index this thread operates on const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per particle if (idx >= N+nghosts) return; // acquire particle data Scalar4 postype = d_pos[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); const unsigned int type = __scalar_as_int(postype.w); // get position in simulation box uchar3 periodic = box.getPeriodic(); Scalar3 f = box.makeFraction(pos,ghost_width); /* check if the particle is inside the unit cell + ghost layer in all dimensions * this tolerance is small enough that when we multiply by the morton code bin size, we are still in range * we silently ignore ghosts outside of this width, and instead deal with that special case below * where extra ghosts are communicated (e.g. for bonded interactions) */ if (((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) || (f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) || (f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001))) && idx < N) { atomicMax(d_morton_conditions,idx+1); return; } // find the bin each particle belongs in int ib = (int)(f.x * MORTON_CODE_N_BINS); int jb = (int)(f.y * MORTON_CODE_N_BINS); int kb = (int)(f.z * MORTON_CODE_N_BINS); if (!periodic.x) // ghosts exist and may be past layer width { // handle special cases where random ghosts are beyond the expected layer // by just rounding to the nearest edge if (ib < 0) { ib = 0; } else if (ib >= MORTON_CODE_N_BINS) { ib = MORTON_CODE_N_BINS - 1; } } else if (ib == MORTON_CODE_N_BINS) // some particles lie exactly on the edge, floor them to zero { ib = 0; } // do as for x in y if (!periodic.y) { if (jb < 0) { jb = 0; } else if (jb >= MORTON_CODE_N_BINS) { jb = MORTON_CODE_N_BINS - 1; } } else if (jb == MORTON_CODE_N_BINS) { jb = 0; } // do as for y in z if (!periodic.z) { if (kb < 0) { kb = 0; } else if (kb >= MORTON_CODE_N_BINS) { kb = MORTON_CODE_N_BINS - 1; } } else if (kb == MORTON_CODE_N_BINS) { kb = 0; } // inline call to some bit swizzling arithmetic unsigned int ii = expandBits((unsigned int)ib); unsigned int jj = expandBits((unsigned int)jb); unsigned int kk = expandBits((unsigned int)kb); unsigned int morton_code = ii * 4 + jj * 2 + kk; // save the morton code and corresponding particle index for sorting // the morton codes hold both the type and the code to sort by both type and position simultaneously d_morton_types[idx] = (((uint64_t)type) << MORTON_CODE_BITS) + (uint64_t)morton_code; d_map_tree_pid[idx] = idx; } /*! * \param d_morton_types Morton code-type keys per particle * \param d_map_tree_pid List to be overwritten with particle ids in ascending order * \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds * \param d_pos Particle positions * \param N Number of local particles * \param nghosts Number of ghost particles * \param box Local simulation box * \param ghost_width Anticipated size of the ghost layer for nonbonded interactions * \param block_size Requested thread block size of kernel launch * * \returns hipSuccess on completion */ hipError_t gpu_nlist_morton_types(uint64_t *d_morton_types, unsigned int *d_map_tree_pid, int *d_morton_conditions, const Scalar4 *d_pos, const unsigned int N, const unsigned int nghosts, const BoxDim& box, const Scalar3 ghost_width, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_nlist_morton_types_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); hipLaunchKernelGGL(( gpu_nlist_morton_types_kernel), dim3((N+nghosts)/run_block_size + 1), dim3(run_block_size), 0, 0, d_morton_types, d_map_tree_pid, d_morton_conditions, d_pos, N, nghosts, box, ghost_width); return hipSuccess; } /*! * \param d_morton_types Morton code-type keys per particle * \param d_morton_types_alt Auxiliary array of equal size to d_morton_types for double buffered sorting * \param d_map_tree_pid List of particle ids * \param d_map_tree_pid_alt Auxiliary array of equal size to d_map_tree_pid for double buffered sorting * \param d_tmp_storage Temporary storage in device memory * \param tmp_storage_bytes Number of bytes allocated for temporary storage * \param swap_morton Flag to switch real data from auxiliary array to primary array after sorting * \param swap_map Flag to switch real data from auxiliary array to primary array after sorting * \param Ntot Total number of keys to sort * \param n_type_bits Number of bits to check for particle types * * \returns hipSuccess on completion * * \b Implementation * The CUB library is used for device-wide radix sorting. Radix sorting is O(kN) where k is the number of bits to check * in an unsigned integer key, and N is the number of keys. We restrict the number of bits checked in the max 64 bit * keys by only checking up to the MORTON_CODE_BITS + n_type_bits most significant bit. CUB DeviceRadixSort performs * its own tuning at run time. * * Because CUB requires temporary storage, this function must be called twice. First, when \a d_tmp_storage is NULL, * the number of bytes required for temporary storage is saved in \a tmp_storage_bytes. This memory must then be * allocated in \a d_tmp_storage. On the second call, the radix sort is performed. Because the radix sort may put the * active (sorted) buffer in either slot of the DoubleBuffer, a boolean flag is set in \a swap_morton and \a swap_map * for whether these data arrays should be swapped. */ hipError_t gpu_nlist_morton_sort(uint64_t *d_morton_types, uint64_t *d_morton_types_alt, unsigned int *d_map_tree_pid, unsigned int *d_map_tree_pid_alt, void *d_tmp_storage, size_t &tmp_storage_bytes, bool &swap_morton, bool &swap_map, const unsigned int Ntot, const unsigned int n_type_bits) { // initialize memory as "double buffered" cub::DoubleBuffer<uint64_t> d_keys(d_morton_types, d_morton_types_alt); cub::DoubleBuffer<unsigned int> d_vals(d_map_tree_pid, d_map_tree_pid_alt); // on the first pass, this just sizes the temporary storage // on the second pass, it actually does the radix sort hipcub::DeviceRadixSort::SortPairs(d_tmp_storage, tmp_storage_bytes, d_keys, d_vals, Ntot, 0, MORTON_CODE_BITS+n_type_bits); // we've only done something to the buffers on the second time when temporary storage is allocated if (d_tmp_storage != NULL) { // mark that the gpu arrays should be flipped if the final result is not in the right array swap_morton = (d_keys.selector == 1); swap_map = (d_vals.selector == 1); } return hipSuccess; } //! Kernel to merge adjacent codes into leaf nodes /*! * \param d_tree_aabbs Flat array holding all AABBs for the tree * \param d_morton_codes_red The Morton codes corresponding to the merged leafs * \param d_tree_parent_sib Parent and sibling indexes for all nodes * \param d_morton_types Morton-code type keys for all particles * \param d_pos Particle positions * \param d_num_per_type Number of particles per type * \param ntypes Number of particle types * \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index) * \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type * \param d_type_head Index to first type and leaf ordered particles by type * \param Ntot Total number of keys to sort * \param nleafs Number of leaf nodes * * \b Implementation * One thread per leaf is called, and is responsible for merging NLIST_GPU_PARTICLES_PER_LEAF into an AABB. Each thread * first determines what type of leaf particle it is operating on by calculating and iterating on the number of leafs * of each type. Then, the starting index is determined by subtracting d_leaf_offset[type] from the starting index that * would be set in a nleaf x NLIST_GPU_PARTICLES_PER_LEAF array. The reason for this complexity is that the leaf particle * array is not permitted to have any "holes" in it for faster traversal. The AABB is merged from the particle * positions, and a Morton code is assigned to this AABB for determining tree hierarchy based on the Morton code of * the first particle in the leaf. Although this does not necessarily generate the best ordering along the Z order curve * for the newly merged leafs, it does guarantee that the leaf Morton codes are still in lexicographic ordering. * * AABBs are stored as two Scalar4s in a flat array. The first three coordinates of each Scalar4 correspond to the upper * and lower bounds of the AABB. The last value of the upper AABB will hold a "rope" for traversing the tree (see * gpu_nlist_bubble_aabbs_kernel), while the last value of the lower AABB holds the number of particles for a leaf node, * or the left child for an internal node. This is determined by setting a bit to mark this value as a rope or as child. */ __global__ void gpu_nlist_merge_particles_kernel(Scalar4 *d_tree_aabbs, uint32_t *d_morton_codes_red, uint2 *d_tree_parent_sib, const uint64_t *d_morton_types, const Scalar4 *d_pos, const unsigned int *d_num_per_type, const unsigned int ntypes, const unsigned int *d_map_tree_pid, const unsigned int *d_leaf_offset, const unsigned int *d_type_head, const unsigned int Ntot, const unsigned int nleafs) { // leaf index const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per leaf if (idx >= nleafs) return; // get what type of leaf I am unsigned int total_bins = 0; int leaf_type = -1; unsigned int max_idx = Ntot; for (unsigned int cur_type=0; leaf_type == -1 && cur_type < ntypes; ++cur_type) { total_bins += (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF; if (idx < total_bins) { leaf_type = cur_type; for (unsigned int next_type=cur_type+1; next_type < ntypes; ++next_type) { if (d_type_head[next_type]) { max_idx = d_type_head[next_type] - 1; break; // quit out of this inner loop once a match is found } } break; // quit the outer loop } } // get the starting particle index assuming naive leaf structure, and then subtract offset to eliminate "holes" unsigned int start_idx = idx*NLIST_GPU_PARTICLES_PER_LEAF - d_leaf_offset[leaf_type]; unsigned int end_idx = (max_idx - start_idx > NLIST_GPU_PARTICLES_PER_LEAF) ? start_idx + NLIST_GPU_PARTICLES_PER_LEAF : max_idx; // upper also holds the skip value, but we have no idea what this is right now Scalar4 upper = d_pos[ d_map_tree_pid[start_idx] ]; upper.w = 0.0f; // lower holds the particle number, we have one already Scalar4 lower = upper; unsigned int npart = 1; for (unsigned int cur_p=start_idx+1; cur_p < end_idx; ++cur_p) { Scalar4 cur_pos = d_pos[ d_map_tree_pid[cur_p] ]; // merge the boxes together if (cur_pos.x < lower.x) lower.x = cur_pos.x; if (cur_pos.x > upper.x) upper.x = cur_pos.x; if (cur_pos.y < lower.y) lower.y = cur_pos.y; if (cur_pos.y > upper.y) upper.y = cur_pos.y; if (cur_pos.z < lower.z) lower.z = cur_pos.z; if (cur_pos.z > upper.z) upper.z = cur_pos.z; ++npart; } d_tree_aabbs[2*idx] = upper; d_tree_aabbs[2*idx + 1] = make_scalar4(lower.x, lower.y, lower.z, __int_as_scalar(npart << 1)); // take logical AND with the 30 bit mask for the morton codes to extract just the morton code // no sense swinging around 64 bit integers anymore d_morton_codes_red[idx] = (unsigned int)(d_morton_types[start_idx] & MORTON_TYPE_MASK_64); // fill the parent/sib relationships as if everything is a single leaf at first, to be overridden by hierarchy gen // when this is not the case d_tree_parent_sib[idx] = make_uint2(idx, idx << 1); } /*! * \param d_tree_aabbs Flat array holding all AABBs for the tree * \param d_morton_codes_red The Morton codes corresponding to the merged leafs * \param d_tree_parent_sib Parent and sibling indexes for all nodes * \param d_morton_types Morton-code type keys for all particles * \param d_pos Particle positions * \param d_num_per_type Number of particles per type * \param ntypes Number of particle types * \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index) * \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type * \param d_type_head Index to first type and leaf ordered particles by type * \param Ntot Total number of keys to sort * \param nleafs Number of leaf nodes * * \returns hipSuccess on completion */ hipError_t gpu_nlist_merge_particles(Scalar4 *d_tree_aabbs, uint32_t *d_morton_codes_red, uint2 *d_tree_parent_sib, const uint64_t *d_morton_types, const Scalar4 *d_pos, const unsigned int *d_num_per_type, const unsigned int ntypes, const unsigned int *d_map_tree_pid, const unsigned int *d_leaf_offset, const unsigned int *d_type_head, const unsigned int Ntot, const unsigned int nleafs, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_nlist_merge_particles_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); hipLaunchKernelGGL(( gpu_nlist_merge_particles_kernel), dim3(nleafs/run_block_size + 1), dim3(block_size), 0, 0, d_tree_aabbs, d_morton_codes_red, d_tree_parent_sib, d_morton_types, d_pos, d_num_per_type, ntypes, d_map_tree_pid, d_leaf_offset, d_type_head, Ntot, nleafs); return hipSuccess; } //! Computes the longest common prefix between Morton codes /*! * \param d_morton_codes Array of Morton codes * \param i First Morton code index * \param j Second Morton code index * \param min_idx The smallest index considered "in range" (inclusive) * \param max_idx The last index considered "in range" (inclusive) * * \returns number of bits shared between the Morton codes of i and j * * delta(i,j) is defined as the largest number of bits shared between Morton codes i and j. When the Morton codes are * sorted, this implies delta(i',j') >= delta(i,j) for any i',j' in [i,j]. If i and j lie outside * of the range of Morton codes corresponding to this tree, then it always returns -1. If the Morton codes for i and j * are identical, then the longest prefix of i and j is used as a tie breaker. */ __device__ inline int delta(const uint32_t *d_morton_codes, unsigned int i, unsigned int j, int min_idx, int max_idx) { if (j > max_idx || j < min_idx) { return -1; } uint32_t first_code = d_morton_codes[i]; uint32_t last_code = d_morton_codes[j]; // if codes match, then use index as tie breaker // the number of shared bits is equal to the 32 bits in the integer, plus the number of bits shared between the // indexes (offset from the start of the node range to make things simpler) if (first_code == last_code) { return (32 + __clz((i-min_idx) ^ (j-min_idx))); } else { return __clz(first_code ^ last_code); } } //! Determines the range of Morton codes that a node covers /*! * \param d_morton_codes Array of Morton codes * \param min_idx The smallest Morton code index considered "in range" (inclusive) * \param max_idx The last Morton code index considered "in range" (inclusive) * \param idx Current node (Morton code) index * * \returns the minimum and maximum leafs covered by this node * \note This is a literal implementation of the Karras pseudocode, with no optimizations or refinement. * Tero Karras, "Maximizing parallelism in the construction of BVHs, octrees, and k-d trees", * High Performance Graphics (2012). */ __device__ inline uint2 determineRange(const uint32_t *d_morton_codes, const int min_idx, const int max_idx, const int idx) { int forward_prefix = delta(d_morton_codes, idx, idx+1, min_idx, max_idx); int backward_prefix = delta(d_morton_codes, idx, idx-1, min_idx, max_idx); // get direction of the range based on sign int d = ((forward_prefix - backward_prefix) > 0) ? 1 : -1; // get minimum prefix int min_prefix = delta(d_morton_codes, idx, idx-d, min_idx, max_idx); // get maximum prefix by binary search int lmax = 2; while( delta(d_morton_codes, idx, idx + d*lmax, min_idx, max_idx) > min_prefix) { lmax = lmax << 1; } unsigned int len = 0; unsigned int step = lmax; do { step = step >> 1; unsigned int new_len = len + step; if (delta(d_morton_codes, idx, idx + d*new_len, min_idx, max_idx) > min_prefix) len = new_len; } while (step > 1); // order range based on direction uint2 range; if (d > 0) { range.x = idx; range.y = idx + len; } else { range.x = idx - len; range.y = idx; } return range; } //! Finds the split position in Morton codes covered by a range /*! * \param d_morton_codes Array of Morton codes * \param first First leaf node in the range * \param last Last leaf node in the range * * \returns the leaf index corresponding to the split in Morton codes * See determineRange for original source of algorithm. */ __device__ inline unsigned int findSplit(const uint32_t *d_morton_codes, const unsigned int first, const unsigned int last) { uint32_t first_code = d_morton_codes[first]; uint32_t last_code = d_morton_codes[last]; // if codes match, then just split evenly if (first_code == last_code) return (first + last) >> 1; // get the length of the common prefix int common_prefix = __clz(first_code ^ last_code); // assume split starts at first, and begin binary search unsigned int split = first; unsigned int step = last - first; do { // exponential decrease (is factor of 2 best?) step = (step + 1) >> 1; unsigned int new_split = split + step; // if proposed split lies within range if (new_split < last) { unsigned int split_code = d_morton_codes[new_split]; int split_prefix = __clz(first_code ^ split_code); // if new split shares a longer number of bits, accept it if (split_prefix > common_prefix) { split = new_split; } } } while (step > 1); return split; } //! Kernel to generate the parent-child-sibling relationships between nodes /*! * \param d_tree_parent_sib Parent and sibling for each node in the tree * \param d_morton_codes Morton codes for each leaf node * \param d_num_per_type Number of particles per type * \param ntypes Number of types * \param nleafs Number of leafs * * \b Implementation * One thread is called per internal node in a single kernel launch. Each thread first determines its "local" index * as an internal node within a tree based on the number of leafs per tree. The range of leafs covered by the internal * node is determined, and then its split position is identified. The split identifies the children of the node as * another internal node or as a leaf node. * * The parent and sibling of each child node is saved. The sibling id is bit shifted so as to use a single bit to encode * the sibling as a right child or left child (after shifting, we set the bit to 1 if the sibling is a right child). * If the child is a root node, it also saves information for itself (since no other node ever identifies a root as a * child node). */ __global__ void gpu_nlist_gen_hierarchy_kernel(uint2 *d_tree_parent_sib, const uint32_t *d_morton_codes, const unsigned int *d_num_per_type, const unsigned int ntypes, const unsigned int nleafs, const unsigned int ninternal) { // compute the internal node index this thread operates on const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per internal node if (idx >= ninternal) return; // get what type of leaf I am unsigned int min_idx = 0; // the "0" of the leaf node array unsigned int max_idx = 0; // the "N-1" of the leaf node array unsigned int node_idx = idx; unsigned int origin = 0; unsigned int end = 0; unsigned int cur_type=0; unsigned int active_types=0; for (cur_type=0; cur_type < ntypes; ++cur_type) { // current min index is the previous max index min_idx = max_idx; // max index adds the number of internal nodes in this type (nleaf - 1) const unsigned int cur_nleaf = (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF; if (cur_nleaf > 0) { max_idx += cur_nleaf-1; ++active_types; } // we break the loop if we are in range if (idx < max_idx) { // decrement by 1 to get this back into the number we really need --active_types; // now, we repurpose the min and max index to now correspond to the *leaf* index. // the min index is the minimum *leaf* index origin = min_idx + active_types; end = max_idx + active_types; node_idx += active_types; break; } } // enact the magical split determining uint2 range = determineRange(d_morton_codes, origin, end, node_idx); unsigned int first = range.x; unsigned int last = range.y; unsigned int split = findSplit(d_morton_codes, first, last); uint2 children; // set the children, shifting ahead by nleafs - cur_type to account for leaf shifting // this factor comes out from resetting 0 = N_leaf,i each time, and then remapping this to // an internal node children.x = (split == first) ? split : (nleafs - active_types + split); children.y = ((split + 1) == last) ? (split + 1) : nleafs - active_types + split + 1; uint2 parent_sib; parent_sib.x = nleafs + idx; // encode the sibling as the right child parent_sib.y = children.y << 1; parent_sib.y |= 1; d_tree_parent_sib[children.x] = parent_sib; // encode the sibling as the left child parent_sib.y = children.x << 1; d_tree_parent_sib[children.y] = parent_sib; // root is always number "zero", but only it can set its parent / sibling // we mark both of these as the root for traversing, since only the root node // will be its own sibling if (node_idx == origin) { parent_sib.x = nleafs + idx; parent_sib.y = (nleafs + idx) << 1; d_tree_parent_sib[nleafs + idx] = parent_sib; } } /*! * \param d_tree_parent_sib Parent and sibling for each node in the tree * \param d_morton_codes Morton codes for each leaf node * \param d_num_per_type Number of particles per type * \param ntypes Number of types * \param nleafs Number of leafs * \param block_size Requested thread block size * * \returns hipSuccess on completion */ hipError_t gpu_nlist_gen_hierarchy(uint2 *d_tree_parent_sib, const uint32_t *d_morton_codes, const unsigned int *d_num_per_type, const unsigned int ntypes, const unsigned int nleafs, const unsigned int ninternal, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_nlist_gen_hierarchy_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); // one thread per internal node hipLaunchKernelGGL(( gpu_nlist_gen_hierarchy_kernel), dim3(ninternal/run_block_size + 1), dim3(run_block_size), 0, 0, d_tree_parent_sib, d_morton_codes, d_num_per_type, ntypes, nleafs, ninternal); return hipSuccess; } //! Kernel to bubble up enclosing AABBs to internal nodes from leaf nodes /*! * \param d_node_locks Atomic flags identifying when node has been visited * \param d_tree_aabbs AABB array for all tree nodes * \param d_tree_parent_sib Parent and sibling indexes of each node * \param ntypes Number of particle types * \param nleafs Number of leaf nodes * * \b Implementation * One thread is called per leaf node. The second thread to reach an internal node processes its two children, * which guarantees that no node AABB is prematurely processed. The arrival order at a node is controlled by an atomic * thread lock in global memory. This locking could be accelerated by using shared memory whenever a node is being * processed by threads in the same block. * * When processing the node, the thread also walks up the tree to find the "rope" that tells a traverser * how to navigate the tree. If a query AABB intersects the current node, then the traverser always moves the the left * child of the current node. If the AABB does not intersect, it moves along the "rope" to the next portion of the tree. * The "rope" is calculated by walking back up the tree to find the earliest ancestor that is a left child of its * parent. The rope then goes to that ancestor's sibling. If the root node is reached, then the rope is set to -1 to * indicate traversal should be aborted. * * This kernel also encodes the left child of a node into the AABB for internal nodes. The thread processing the node * checks if it arrived from a left child or right child of the node it is processing, and sets the left child of that * parent accordingly. A child is indicated by bit shifting, and setting the first bit to 1. */ __global__ void gpu_nlist_bubble_aabbs_kernel(unsigned int *d_node_locks, Scalar4 *d_tree_aabbs, const uint2 *d_tree_parent_sib, const unsigned int ntypes, const unsigned int nleafs) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= nleafs) return; // okay, first we start from the leaf and set my bounding box Scalar4 cur_upper = d_tree_aabbs[2*idx]; Scalar4 cur_lower = d_tree_aabbs[2*idx+1]; // zero the counters for internal nodes cur_upper.w = 0.0f; cur_lower.w = 0.0f; unsigned int cur_node = idx; unsigned int lock_key = 0; do { uint2 cur_parent_sib = d_tree_parent_sib[cur_node]; unsigned int cur_parent = cur_parent_sib.x; // if the current sibling is a right child, then the current node is a left child bool cur_is_left = (cur_parent_sib.y & 1); unsigned int cur_sibling = cur_parent_sib.y >> 1; // first we compute the skip for this node always // back track up the tree until you find a left child // we have a check in place so that we don't stall on the root node uint2 backtrack = cur_parent_sib; while (!(backtrack.y & 1) && backtrack.x != (backtrack.y >> 1)) { backtrack = d_tree_parent_sib[backtrack.x]; } // then, the skip is to the sibling of that node, or else to quit if (backtrack.y & 1) { d_tree_aabbs[2*cur_node].w = __int_as_scalar(backtrack.y >> 1); } else { d_tree_aabbs[2*cur_node].w = __int_as_scalar(-1); } // then, we do an atomicAdd on the lock to see if we need to process the parent AABBs // check to make sure the parent is bigger than nleafs, or else the node lock always fails // so that we terminate the thread lock_key = (cur_parent >= nleafs) ? atomicAdd(d_node_locks + cur_parent - nleafs, 1) : 0; // process the node if (lock_key == 1) { // compute the max upper bound Scalar4 sib_upper = d_tree_aabbs[2*cur_sibling]; if (sib_upper.x > cur_upper.x) cur_upper.x = sib_upper.x; if (sib_upper.y > cur_upper.y) cur_upper.y = sib_upper.y; if (sib_upper.z > cur_upper.z) cur_upper.z = sib_upper.z; d_tree_aabbs[2*cur_parent] = cur_upper; // compute the min lower bound Scalar4 sib_lower = d_tree_aabbs[2*cur_sibling+1]; if (sib_lower.x < cur_lower.x) cur_lower.x = sib_lower.x; if (sib_lower.y < cur_lower.y) cur_lower.y = sib_lower.y; if (sib_lower.z < cur_lower.z) cur_lower.z = sib_lower.z; // this must always be some internal node, so stash the left child of this node here unsigned int left_child_masked = ((cur_is_left ? cur_node : cur_sibling) << 1) | 1; cur_lower.w = __int_as_scalar( left_child_masked ); d_tree_aabbs[2*cur_parent+1] = cur_lower; // bump the current node one level cur_node = cur_parent; } } while (lock_key == 1); } /*! * \param d_node_locks Atomic flags identifying when node has been visited * \param d_tree_aabbs AABB array for all tree nodes * \param d_tree_parent_sib Parent and sibling indexes of each node * \param ntypes Number of particle types * \param nleafs Number of leaf nodes * \param block_size Requested thread block size * * \returns hipSuccess on completion */ hipError_t gpu_nlist_bubble_aabbs(unsigned int *d_node_locks, Scalar4 *d_tree_aabbs, const uint2 *d_tree_parent_sib, const unsigned int ntypes, const unsigned int nleafs, const unsigned int ninternal, const unsigned int block_size) { hipMemset(d_node_locks, 0, sizeof(unsigned int)*ninternal); hipLaunchKernelGGL(( gpu_nlist_bubble_aabbs_kernel), dim3(nleafs/block_size + 1), dim3(block_size), 0, 0, d_node_locks, d_tree_aabbs, d_tree_parent_sib, ntypes, nleafs); return hipSuccess; } //! Kernel to rearrange particle data into leaf order for faster traversal /*! * \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order * \param d_leaf_db Particle diameter and body id in leaf order * \param d_pos Particle positions * \param d_diameter Particle diameters * \param d_body Particle body ids * \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id * \param Ntot Number of particles owned by this rank * * \b Implementation * One thread per particle is called. Writes are coalesced by writing in leaf order, and reading in a scattered way. */ __global__ void gpu_nlist_move_particles_kernel(Scalar4 *d_leaf_xyzf, Scalar2 *d_leaf_db, const Scalar4 *d_pos, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int *d_map_tree_pid, const unsigned int Ntot) { // get thread index const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per particle if (idx >= Ntot) return; // read and write particle data unsigned int p_idx = d_map_tree_pid[idx]; Scalar4 pos_i = d_pos[p_idx]; d_leaf_xyzf[idx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __int_as_scalar(p_idx)); Scalar2 db = make_scalar2(d_diameter[p_idx], __int_as_scalar(d_body[p_idx])); d_leaf_db[idx] = db; } /*! * \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order * \param d_leaf_db Particle diameter and body id in leaf order * \param d_pos Particle positions * \param d_diameter Particle diameters * \param d_body Particle body ids * \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id * \param Ntot Number of particles owned by this rank * \param block_size Requested thread block size * * \returns hipSuccess on completion */ hipError_t gpu_nlist_move_particles(Scalar4 *d_leaf_xyzf, Scalar2 *d_leaf_db, const Scalar4 *d_pos, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int *d_map_tree_pid, const unsigned int Ntot, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_nlist_move_particles_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); hipLaunchKernelGGL(( gpu_nlist_move_particles_kernel), dim3(Ntot/run_block_size + 1), dim3(run_block_size), 0, 0, d_leaf_xyzf, d_leaf_db, d_pos, d_diameter, d_body, d_map_tree_pid, Ntot); return hipSuccess; } //! Kernel for traversing tree to generate neighbor list /*! * \param d_nlist Neighbor list for writing * \param d_n_neigh Number of neighbors per particle * \param d_last_updated_pos Records current particle positions * \param d_conditions Store overflow condition by type * \param d_Nmax Maximum number of neighbors allocated by type * \param d_head_list Indexes for writing into neighbor list * \param N Number of particles * \param nghosts Number of ghost particles * \param d_map_tree_pid Map leaf index to local particle index * \param d_leaf_offset Offset for reading leaf particles by type * \param d_tree_roots Index for tree root by type * \param d_tree_aabbs Tree AABBs * \param nleafs Total number of leafs * \param d_leaf_xyzf Leaf position-id array * \param d_leaf_db Leaf diameter-body array * \param d_pos Particle positions * \param d_image_list Translation vectors to check for traversal * \param nimages Number of translation vectors to check * \param d_r_cut Cutoff radius by type r_cut(i,j) * \param r_buff Buffer around cutoff radius * \param max_diam Maximum diameter attained by a particle for diameter shifting * \param ntypes Number of particle types * * \b Implementation * One thread is launched per particle, but the threads operate on particles in leaf order rather than ParticleData * order in order to minimize divergence within a warp (particles in the same leaf should intersect similar parts of the * tree). Each thread iterates on the particle types (trees) and queries on all translation vectors using a stackless * search. When the query AABB intersects a node AABB, the node AABB is checked to be an internal node or a leaf node. * If an internal node, then the traversal advances to that node's left child. If a leaf node, the leaf particles are * tested directly to be included in the neighbor list. The node then advances along that leaf node's rope. If the AABB * is not intersected, the traversal advances along the rope. This process proceeds until a rope signals that the * traversal is complete. */ template<unsigned char flags> __global__ void gpu_nlist_traverse_tree_kernel(unsigned int *d_nlist, unsigned int *d_n_neigh, Scalar4 *d_last_updated_pos, unsigned int *d_conditions, const unsigned int *d_Nmax, const unsigned int *d_head_list, const unsigned int N, const unsigned int nghosts, const unsigned int *d_map_tree_pid, const unsigned int *d_leaf_offset, const unsigned int *d_tree_roots, const Scalar4 *d_tree_aabbs, const unsigned int nleafs, const Scalar4 *d_leaf_xyzf, const Scalar2 *d_leaf_db, const Scalar4 *d_pos, const Scalar3 *d_image_list, const unsigned int nimages, const Scalar *d_r_cut, const Scalar r_buff, const Scalar max_diam, const unsigned int ntypes) { bool filter_body = flags & 1; bool diameter_shift = flags & 2; // cache the r_listsq parameters into shared memory const Index2D typpair_idx(ntypes); const unsigned int num_typ_parameters = typpair_idx.getNumElements(); // shared data for per type pair parameters extern __shared__ unsigned char s_data[]; // pointer for the r_listsq data Scalar *s_r_list = (Scalar *)(&s_data[0]); unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]); unsigned int *s_leaf_offset = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters + sizeof(unsigned int)*ntypes]); // load in the per type pair r_list for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < num_typ_parameters) { Scalar r_cut = d_r_cut[cur_offset + threadIdx.x]; // force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0); } if (cur_offset + threadIdx.x < ntypes) { s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x]; s_leaf_offset[cur_offset + threadIdx.x] = d_leaf_offset[cur_offset + threadIdx.x]; } } __syncthreads(); // compute the particle index this thread operates on const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // quit now if this thread is processing past the end of the leaf list if (idx >= (N+nghosts)) return; // read in the current position unsigned int my_pidx = d_map_tree_pid[idx]; // we only process particles owned by this processor for neighbors if (my_pidx >= N) return; const Scalar4 postype_i = texFetchScalar4(d_pos, pdata_pos_tex, my_pidx); const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z); const unsigned int type_i = __scalar_as_int(postype_i.w); // fetch the diameter and body out of the leaf texture since it's bound anyway const Scalar2 db_i = texFetchScalar2(d_leaf_db, leaf_db_tex, idx); const Scalar diam_i = db_i.x; const unsigned int body_i = __scalar_as_int(db_i.y); const unsigned int nlist_head_i = texFetchUint(d_head_list, head_list_tex, my_pidx); unsigned int n_neigh_i = 0; for (unsigned int cur_pair_type=0; cur_pair_type < ntypes; ++cur_pair_type) { // Check primary box const Scalar r_cut_i = s_r_list[typpair_idx(type_i,cur_pair_type)]; // Skip this tree type if it is not needed if (r_cut_i <= Scalar(0.0)) continue; // stash the r_cutsq before any diameter shifting const Scalar r_cutsq_i = r_cut_i*r_cut_i; // the rlist to use for the AABB search has to be at least as big as the biggest diameter Scalar r_list_i = r_cut_i; if (diameter_shift) r_list_i += max_diam - Scalar(1.0); const unsigned int cur_tree_root = d_tree_roots[cur_pair_type]; // skip this type if we don't have it if (cur_tree_root == NLIST_GPU_INVALID_NODE) continue; for (unsigned int cur_image = 0; cur_image < nimages; ++cur_image) { const Scalar3 pos_i_image = pos_i + d_image_list[cur_image]; const Scalar3 aabb_upper = make_scalar3(pos_i_image.x + r_list_i, pos_i_image.y + r_list_i, pos_i_image.z + r_list_i); const Scalar3 aabb_lower = make_scalar3(pos_i_image.x - r_list_i, pos_i_image.y - r_list_i, pos_i_image.z - r_list_i); // stackless search int cur_node_idx = cur_tree_root; while (cur_node_idx > -1) { const Scalar4 upper_rope = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx); const Scalar4 lower_np = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx+1); if (!(aabb_upper.x < lower_np.x || aabb_lower.x > upper_rope.x || aabb_upper.y < lower_np.y || aabb_lower.y > upper_rope.y || aabb_upper.z < lower_np.z || aabb_lower.z > upper_rope.z)) { const unsigned int np_child_masked = __scalar_as_int(lower_np.w); if(!(np_child_masked & 1)) { // leaf node // all leaves must have at least 1 particle, so we can use this to decide const unsigned int node_head = NLIST_GPU_PARTICLES_PER_LEAF*cur_node_idx - s_leaf_offset[cur_pair_type]; const unsigned int n_part = np_child_masked >> 1; for (unsigned int cur_p = node_head; cur_p < node_head + n_part; ++cur_p) { // neighbor j const Scalar4 cur_xyzf = texFetchScalar4(d_leaf_xyzf, leaf_xyzf_tex, cur_p); const Scalar3 pos_j = make_scalar3(cur_xyzf.x, cur_xyzf.y, cur_xyzf.z); const unsigned int j = __scalar_as_int(cur_xyzf.w); const Scalar2 cur_db = texFetchScalar2(d_leaf_db, leaf_db_tex, cur_p); const Scalar diam_j = cur_db.x; const unsigned int body_j = __scalar_as_int(cur_db.y); bool excluded = (my_pidx == j); if (filter_body && body_i != 0xffffffff) excluded = excluded | (body_i == body_j); if (!excluded) { // now we can trim down the actual particles based on diameter // compute the shift for the cutoff if not excluded Scalar sqshift = Scalar(0.0); if (diameter_shift) { const Scalar delta = (diam_i + diam_j) * Scalar(0.5) - Scalar(1.0); // r^2 < (r_list + delta)^2 // r^2 < r_listsq + delta^2 + 2*r_list*delta sqshift = (delta + Scalar(2.0) * r_cut_i) * delta; } // compute distance and wrap back into box Scalar3 drij = pos_j - pos_i_image; Scalar dr2 = dot(drij,drij); if (dr2 <= (r_cutsq_i + sqshift)) { if (n_neigh_i < s_Nmax[type_i]) { d_nlist[nlist_head_i + n_neigh_i] = j; } ++n_neigh_i; } } } // leaf nodes always move to their rope cur_node_idx = __scalar_as_int(upper_rope.w); } else { // internal node, take left child cur_node_idx = (np_child_masked >> 1); } } else { cur_node_idx = __scalar_as_int(upper_rope.w); // no overlap, rope ahead } } // end stackless search } // end loop over images } // end loop over pair types // could try reordering by idx instead of pidx, but that seems to not make much difference in microbenchmarking. d_n_neigh[my_pidx] = n_neigh_i; d_last_updated_pos[my_pidx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __scalar_as_int(type_i)); // update the number of neighbors for this type if allocated memory is exceeded if (n_neigh_i >= s_Nmax[type_i]) atomicMax(&d_conditions[type_i], n_neigh_i); } /*! * \param d_nlist Neighbor list for writing * \param d_n_neigh Number of neighbors per particle * \param d_last_updated_pos Records current particle positions * \param d_conditions Store overflow condition by type * \param d_Nmax Maximum number of neighbors allocated by type * \param d_head_list Indexes for writing into neighbor list * \param N Number of particles * \param nghosts Number of ghost particles * \param d_map_tree_pid Map leaf index to local particle index * \param d_leaf_offset Offset for reading leaf particles by type * \param d_tree_roots Index for tree root by type * \param d_tree_aabbs Tree AABBs * \param nleafs Total number of leafs * \param d_leaf_xyzf Leaf position-id array * \param d_leaf_db Leaf diameter-body array * \param d_pos Particle positions * \param d_image_list Translation vectors to check for traversal * \param nimages Number of translation vectors to check * \param d_r_cut Cutoff radius by type r_cut(i,j) * \param r_buff Buffer around cutoff radius * \param max_diam Maximum diameter attained by a particle for diameter shifting * \param ntypes Number of particle types * \param filter_body True if body filtering is enabled * \param diameter_shift True if rcut(i,j) should be shifted by the particle diameters * \param compute_capability Compute capability of the GPU (in 20, 30, 35 format) * \param block_size Requested thread block size * * \returns hipSuccess on completion * \returns hipError_t on failure to texture bind * * \note Kernel calls are templated on body filtering and diameter shifting for optimization. * \note One thread is called for all leaf particles. Some of these threads will die because they correspond to ghost * particles not owned by the rank. Because the leaf particles are sorted, there is no easy way to skip these * particles, and this inefficiency is assumed to be relatively small. */ hipError_t gpu_nlist_traverse_tree(unsigned int *d_nlist, unsigned int *d_n_neigh, Scalar4 *d_last_updated_pos, unsigned int *d_conditions, const unsigned int *d_Nmax, const unsigned int *d_head_list, const unsigned int N, const unsigned int nghosts, const unsigned int *d_map_tree_pid, const unsigned int *d_leaf_offset, const unsigned int *d_tree_roots, const Scalar4 *d_tree_aabbs, const unsigned int nleafs, const unsigned int ninternal, const unsigned int nnodes, const Scalar4 *d_leaf_xyzf, const Scalar2 *d_leaf_db, const Scalar4 *d_pos, const Scalar3 *d_image_list, const unsigned int nimages, const Scalar *d_r_cut, const Scalar r_buff, const Scalar max_diam, const unsigned int ntypes, bool filter_body, bool diameter_shift, const unsigned int compute_capability, const unsigned int block_size) { // shared memory = r_list + Nmax Index2D typpair_idx(ntypes); unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + 2*sizeof(unsigned int)*ntypes; // bind the neighborlist texture if (compute_capability < 35) { pdata_pos_tex.normalized = false; pdata_pos_tex.filterMode = hipFilterModePoint; hipError_t error = hipBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*(N+nghosts)); if (error != hipSuccess) return error; leaf_xyzf_tex.normalized = false; leaf_xyzf_tex.filterMode = hipFilterModePoint; error = hipBindTexture(0, leaf_xyzf_tex, d_leaf_xyzf, sizeof(Scalar4)*(N+nghosts)); if (error != hipSuccess) return error; leaf_db_tex.normalized = false; leaf_db_tex.filterMode = hipFilterModePoint; error = hipBindTexture(0, leaf_db_tex, d_leaf_db, sizeof(Scalar2)*(N+nghosts)); if (error != hipSuccess) return error; aabb_node_bounds_tex.normalized = false; aabb_node_bounds_tex.filterMode = hipFilterModePoint; error = hipBindTexture(0, aabb_node_bounds_tex, d_tree_aabbs, sizeof(Scalar4)*2*nnodes); if (error != hipSuccess) return error; head_list_tex.normalized = false; head_list_tex.filterMode = hipFilterModePoint; error = hipBindTexture(0, head_list_tex, d_head_list, sizeof(unsigned int)*N); if (error != hipSuccess) return error; } if (!filter_body && !diameter_shift) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<0>); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); int nblocks = (N+nghosts)/run_block_size + 1; hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<0>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, N, nghosts, d_map_tree_pid, d_leaf_offset, d_tree_roots, d_tree_aabbs, nleafs, d_leaf_xyzf, d_leaf_db, d_pos, d_image_list, nimages, d_r_cut, r_buff, max_diam, ntypes); } else if (filter_body && !diameter_shift) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<1>); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); int nblocks = (N+nghosts)/run_block_size + 1; hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<1>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, N, nghosts, d_map_tree_pid, d_leaf_offset, d_tree_roots, d_tree_aabbs, nleafs, d_leaf_xyzf, d_leaf_db, d_pos, d_image_list, nimages, d_r_cut, r_buff, max_diam, ntypes); } else if (!filter_body && diameter_shift) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<2>); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); int nblocks = (N+nghosts)/run_block_size + 1; hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<2>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, N, nghosts, d_map_tree_pid, d_leaf_offset, d_tree_roots, d_tree_aabbs, nleafs, d_leaf_xyzf, d_leaf_db, d_pos, d_image_list, nimages, d_r_cut, r_buff, max_diam, ntypes); } else if (filter_body && diameter_shift) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<3>); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); int nblocks = (N+nghosts)/run_block_size + 1; hipLaunchKernelGGL(( gpu_nlist_traverse_tree_kernel<3>), dim3(nblocks), dim3(run_block_size), shared_size, 0, d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, N, nghosts, d_map_tree_pid, d_leaf_offset, d_tree_roots, d_tree_aabbs, nleafs, d_leaf_xyzf, d_leaf_db, d_pos, d_image_list, nimages, d_r_cut, r_buff, max_diam, ntypes); } // unbind the textures if (compute_capability < 35) { hipError_t error = hipUnbindTexture(pdata_pos_tex); if (error != hipSuccess) return error; error = hipUnbindTexture(leaf_xyzf_tex); if (error != hipSuccess) return error; error = hipUnbindTexture(leaf_db_tex); if (error != hipSuccess) return error; error = hipUnbindTexture(aabb_node_bounds_tex); if (error != hipSuccess) return error; error = hipUnbindTexture(head_list_tex); if (error != hipSuccess) return error; } return hipSuccess; } //! Kernel to find divisons between particle types in sorted order /*! * \param d_type_head Index to first type in leaf ordered particles by type * \param d_pos Particle positions * \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id * \param N Total number of particles on rank (including ghosts) * * The starting index for each type of particles is the first particle where the left neighbor is not of the same type. */ __global__ void gpu_nlist_get_divisions_kernel(unsigned int *d_type_head, const Scalar4 *d_pos, const unsigned int *d_map_tree_pid, const unsigned int N) { // compute the particle index this thread operates on const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per particle if (idx >= N) return; const unsigned int cur_pidx = d_map_tree_pid[idx]; // get type of the current particle const Scalar4 cur_postype = d_pos[cur_pidx]; const unsigned int cur_type = __scalar_as_int(cur_postype.w); // all particles except for the first one should look left if (idx > 0) { const unsigned int left_pidx = d_map_tree_pid[idx - 1]; // get type of the particle to my left const Scalar4 left_postype = d_pos[left_pidx]; const unsigned int left_type = __scalar_as_int(left_postype.w); // if the left has a different type, then this is a type boundary, and the type starts at the current thread index if (left_type != cur_type) { d_type_head[cur_type] = idx + 1; // offset the index +1 so that we can use 0 to mean "none of this found" } } else // the first particle just sets its type to be 1 { d_type_head[cur_type] = 1; } } /*! * \param d_type_head Index to first type in leaf ordered particles by type * \param d_num_per_type Number of particles per type * \param d_leaf_offset Offset for reading particles out of leaf order * \param d_tree_roots Root node of each tree * \param d_pos Particles positions * \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id * \param N Total number of particles on rank (including ghosts) * \param ntypes Number of types * \param block_size Requested thread block size * * \returns hipSuccess on completion */ hipError_t gpu_nlist_init_count(unsigned int *d_type_head, const Scalar4 *d_pos, const unsigned int *d_map_tree_pid, const unsigned int N, const unsigned int ntypes, const unsigned int block_size) { // apply the scan static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_nlist_get_divisions_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); // zero out the head list hipMemset(d_type_head, 0, sizeof(unsigned int)*ntypes); // get the head list divisions hipLaunchKernelGGL(( gpu_nlist_get_divisions_kernel), dim3(N/run_block_size + 1), dim3(run_block_size), 0, 0, d_type_head, d_pos, d_map_tree_pid, N); return hipSuccess; } #undef MORTON_CODE_BITS #undef MORTON_TYPE_MASK_64 #undef MORTON_CODE_N_BINS
71ba4e92744e9b5ceacc8852f385664ac4585c32.cu
// Copyright (c) 2009-2017 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward #include "NeighborListGPUTree.cuh" #include "hoomd/TextureTools.h" #include "hoomd/extern/cub/cub.cuh" #define MORTON_CODE_BITS 30 //!< Length of the Morton code in bits (k = 10 bits per direction) #define MORTON_CODE_N_BINS 1024 //!< Number of bins (2^10) per direction to generate 30 bit Morton codes #define MORTON_TYPE_MASK_64 0x000000003fffffffu //!< 64 bit mask to turn morton code-type back to morton code /*! \file NeighborListGPUTree.cu \brief Defines GPU kernel code for neighbor list tree traversal on the GPU */ //! Texture for reading particle positions scalar4_tex_t pdata_pos_tex; //! Texture for reading leaf data scalar4_tex_t leaf_xyzf_tex; //! Texture for the diameter / body scalar2_tex_t leaf_db_tex; //! Texture for reading node upper and lower bounds scalar4_tex_t aabb_node_bounds_tex; //! Texture for the head list texture<unsigned int, 1, cudaReadModeElementType> head_list_tex; //!< Expands a 10-bit integer into 30 bits by inserting 2 zeros after each bit. /*! * \param v unsigned integer with 10 bits set * \returns The integer expanded with two zeros interleaved between bits * http://devblogs.nvidia.com/parallelforall/thinking-parallel-part-iii-tree-construction-gpu/ */ __device__ inline unsigned int expandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } //! Assigns the Morton code-type key for each particle on this processor /*! * \param d_morton_types Morton code-type keys per particle * \param d_map_tree_pid List to be overwritten with particle ids in ascending order * \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds * \param d_pos Particle positions * \param N Number of local particles * \param nghosts Number of ghost particles * \param box Local simulation box * \param ghost_width Anticipated size of the ghost layer for nonbonded interactions * * \b Implementation * A sorting key is generated for each particle by determining the 30 bit Morton code for each particle, and then * concatenating onto the type. Both the Morton code and the type are 32 bit integers, so the concatenation is stored * compactly in a 64 bit integer morton_type = (type << 30) + morton code. In this way, a lexicographic sort will * sort first by type, then by morton code. The corresponding particle id (thread index) is stashed into d_map_tree_pid * to track particles after sorting. */ __global__ void gpu_nlist_morton_types_kernel(uint64_t *d_morton_types, unsigned int *d_map_tree_pid, int *d_morton_conditions, const Scalar4 *d_pos, const unsigned int N, const unsigned int nghosts, const BoxDim box, const Scalar3 ghost_width) { // compute the particle index this thread operates on const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per particle if (idx >= N+nghosts) return; // acquire particle data Scalar4 postype = d_pos[idx]; Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); const unsigned int type = __scalar_as_int(postype.w); // get position in simulation box uchar3 periodic = box.getPeriodic(); Scalar3 f = box.makeFraction(pos,ghost_width); /* check if the particle is inside the unit cell + ghost layer in all dimensions * this tolerance is small enough that when we multiply by the morton code bin size, we are still in range * we silently ignore ghosts outside of this width, and instead deal with that special case below * where extra ghosts are communicated (e.g. for bonded interactions) */ if (((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) || (f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) || (f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001))) && idx < N) { atomicMax(d_morton_conditions,idx+1); return; } // find the bin each particle belongs in int ib = (int)(f.x * MORTON_CODE_N_BINS); int jb = (int)(f.y * MORTON_CODE_N_BINS); int kb = (int)(f.z * MORTON_CODE_N_BINS); if (!periodic.x) // ghosts exist and may be past layer width { // handle special cases where random ghosts are beyond the expected layer // by just rounding to the nearest edge if (ib < 0) { ib = 0; } else if (ib >= MORTON_CODE_N_BINS) { ib = MORTON_CODE_N_BINS - 1; } } else if (ib == MORTON_CODE_N_BINS) // some particles lie exactly on the edge, floor them to zero { ib = 0; } // do as for x in y if (!periodic.y) { if (jb < 0) { jb = 0; } else if (jb >= MORTON_CODE_N_BINS) { jb = MORTON_CODE_N_BINS - 1; } } else if (jb == MORTON_CODE_N_BINS) { jb = 0; } // do as for y in z if (!periodic.z) { if (kb < 0) { kb = 0; } else if (kb >= MORTON_CODE_N_BINS) { kb = MORTON_CODE_N_BINS - 1; } } else if (kb == MORTON_CODE_N_BINS) { kb = 0; } // inline call to some bit swizzling arithmetic unsigned int ii = expandBits((unsigned int)ib); unsigned int jj = expandBits((unsigned int)jb); unsigned int kk = expandBits((unsigned int)kb); unsigned int morton_code = ii * 4 + jj * 2 + kk; // save the morton code and corresponding particle index for sorting // the morton codes hold both the type and the code to sort by both type and position simultaneously d_morton_types[idx] = (((uint64_t)type) << MORTON_CODE_BITS) + (uint64_t)morton_code; d_map_tree_pid[idx] = idx; } /*! * \param d_morton_types Morton code-type keys per particle * \param d_map_tree_pid List to be overwritten with particle ids in ascending order * \param d_morton_conditions Flag if a local particle (not a ghost) is detected out of bounds * \param d_pos Particle positions * \param N Number of local particles * \param nghosts Number of ghost particles * \param box Local simulation box * \param ghost_width Anticipated size of the ghost layer for nonbonded interactions * \param block_size Requested thread block size of kernel launch * * \returns cudaSuccess on completion */ cudaError_t gpu_nlist_morton_types(uint64_t *d_morton_types, unsigned int *d_map_tree_pid, int *d_morton_conditions, const Scalar4 *d_pos, const unsigned int N, const unsigned int nghosts, const BoxDim& box, const Scalar3 ghost_width, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_morton_types_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); gpu_nlist_morton_types_kernel<<<(N+nghosts)/run_block_size + 1, run_block_size>>>(d_morton_types, d_map_tree_pid, d_morton_conditions, d_pos, N, nghosts, box, ghost_width); return cudaSuccess; } /*! * \param d_morton_types Morton code-type keys per particle * \param d_morton_types_alt Auxiliary array of equal size to d_morton_types for double buffered sorting * \param d_map_tree_pid List of particle ids * \param d_map_tree_pid_alt Auxiliary array of equal size to d_map_tree_pid for double buffered sorting * \param d_tmp_storage Temporary storage in device memory * \param tmp_storage_bytes Number of bytes allocated for temporary storage * \param swap_morton Flag to switch real data from auxiliary array to primary array after sorting * \param swap_map Flag to switch real data from auxiliary array to primary array after sorting * \param Ntot Total number of keys to sort * \param n_type_bits Number of bits to check for particle types * * \returns cudaSuccess on completion * * \b Implementation * The CUB library is used for device-wide radix sorting. Radix sorting is O(kN) where k is the number of bits to check * in an unsigned integer key, and N is the number of keys. We restrict the number of bits checked in the max 64 bit * keys by only checking up to the MORTON_CODE_BITS + n_type_bits most significant bit. CUB DeviceRadixSort performs * its own tuning at run time. * * Because CUB requires temporary storage, this function must be called twice. First, when \a d_tmp_storage is NULL, * the number of bytes required for temporary storage is saved in \a tmp_storage_bytes. This memory must then be * allocated in \a d_tmp_storage. On the second call, the radix sort is performed. Because the radix sort may put the * active (sorted) buffer in either slot of the DoubleBuffer, a boolean flag is set in \a swap_morton and \a swap_map * for whether these data arrays should be swapped. */ cudaError_t gpu_nlist_morton_sort(uint64_t *d_morton_types, uint64_t *d_morton_types_alt, unsigned int *d_map_tree_pid, unsigned int *d_map_tree_pid_alt, void *d_tmp_storage, size_t &tmp_storage_bytes, bool &swap_morton, bool &swap_map, const unsigned int Ntot, const unsigned int n_type_bits) { // initialize memory as "double buffered" cub::DoubleBuffer<uint64_t> d_keys(d_morton_types, d_morton_types_alt); cub::DoubleBuffer<unsigned int> d_vals(d_map_tree_pid, d_map_tree_pid_alt); // on the first pass, this just sizes the temporary storage // on the second pass, it actually does the radix sort cub::DeviceRadixSort::SortPairs(d_tmp_storage, tmp_storage_bytes, d_keys, d_vals, Ntot, 0, MORTON_CODE_BITS+n_type_bits); // we've only done something to the buffers on the second time when temporary storage is allocated if (d_tmp_storage != NULL) { // mark that the gpu arrays should be flipped if the final result is not in the right array swap_morton = (d_keys.selector == 1); swap_map = (d_vals.selector == 1); } return cudaSuccess; } //! Kernel to merge adjacent codes into leaf nodes /*! * \param d_tree_aabbs Flat array holding all AABBs for the tree * \param d_morton_codes_red The Morton codes corresponding to the merged leafs * \param d_tree_parent_sib Parent and sibling indexes for all nodes * \param d_morton_types Morton-code type keys for all particles * \param d_pos Particle positions * \param d_num_per_type Number of particles per type * \param ntypes Number of particle types * \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index) * \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type * \param d_type_head Index to first type and leaf ordered particles by type * \param Ntot Total number of keys to sort * \param nleafs Number of leaf nodes * * \b Implementation * One thread per leaf is called, and is responsible for merging NLIST_GPU_PARTICLES_PER_LEAF into an AABB. Each thread * first determines what type of leaf particle it is operating on by calculating and iterating on the number of leafs * of each type. Then, the starting index is determined by subtracting d_leaf_offset[type] from the starting index that * would be set in a nleaf x NLIST_GPU_PARTICLES_PER_LEAF array. The reason for this complexity is that the leaf particle * array is not permitted to have any "holes" in it for faster traversal. The AABB is merged from the particle * positions, and a Morton code is assigned to this AABB for determining tree hierarchy based on the Morton code of * the first particle in the leaf. Although this does not necessarily generate the best ordering along the Z order curve * for the newly merged leafs, it does guarantee that the leaf Morton codes are still in lexicographic ordering. * * AABBs are stored as two Scalar4s in a flat array. The first three coordinates of each Scalar4 correspond to the upper * and lower bounds of the AABB. The last value of the upper AABB will hold a "rope" for traversing the tree (see * gpu_nlist_bubble_aabbs_kernel), while the last value of the lower AABB holds the number of particles for a leaf node, * or the left child for an internal node. This is determined by setting a bit to mark this value as a rope or as child. */ __global__ void gpu_nlist_merge_particles_kernel(Scalar4 *d_tree_aabbs, uint32_t *d_morton_codes_red, uint2 *d_tree_parent_sib, const uint64_t *d_morton_types, const Scalar4 *d_pos, const unsigned int *d_num_per_type, const unsigned int ntypes, const unsigned int *d_map_tree_pid, const unsigned int *d_leaf_offset, const unsigned int *d_type_head, const unsigned int Ntot, const unsigned int nleafs) { // leaf index const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per leaf if (idx >= nleafs) return; // get what type of leaf I am unsigned int total_bins = 0; int leaf_type = -1; unsigned int max_idx = Ntot; for (unsigned int cur_type=0; leaf_type == -1 && cur_type < ntypes; ++cur_type) { total_bins += (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF; if (idx < total_bins) { leaf_type = cur_type; for (unsigned int next_type=cur_type+1; next_type < ntypes; ++next_type) { if (d_type_head[next_type]) { max_idx = d_type_head[next_type] - 1; break; // quit out of this inner loop once a match is found } } break; // quit the outer loop } } // get the starting particle index assuming naive leaf structure, and then subtract offset to eliminate "holes" unsigned int start_idx = idx*NLIST_GPU_PARTICLES_PER_LEAF - d_leaf_offset[leaf_type]; unsigned int end_idx = (max_idx - start_idx > NLIST_GPU_PARTICLES_PER_LEAF) ? start_idx + NLIST_GPU_PARTICLES_PER_LEAF : max_idx; // upper also holds the skip value, but we have no idea what this is right now Scalar4 upper = d_pos[ d_map_tree_pid[start_idx] ]; upper.w = 0.0f; // lower holds the particle number, we have one already Scalar4 lower = upper; unsigned int npart = 1; for (unsigned int cur_p=start_idx+1; cur_p < end_idx; ++cur_p) { Scalar4 cur_pos = d_pos[ d_map_tree_pid[cur_p] ]; // merge the boxes together if (cur_pos.x < lower.x) lower.x = cur_pos.x; if (cur_pos.x > upper.x) upper.x = cur_pos.x; if (cur_pos.y < lower.y) lower.y = cur_pos.y; if (cur_pos.y > upper.y) upper.y = cur_pos.y; if (cur_pos.z < lower.z) lower.z = cur_pos.z; if (cur_pos.z > upper.z) upper.z = cur_pos.z; ++npart; } d_tree_aabbs[2*idx] = upper; d_tree_aabbs[2*idx + 1] = make_scalar4(lower.x, lower.y, lower.z, __int_as_scalar(npart << 1)); // take logical AND with the 30 bit mask for the morton codes to extract just the morton code // no sense swinging around 64 bit integers anymore d_morton_codes_red[idx] = (unsigned int)(d_morton_types[start_idx] & MORTON_TYPE_MASK_64); // fill the parent/sib relationships as if everything is a single leaf at first, to be overridden by hierarchy gen // when this is not the case d_tree_parent_sib[idx] = make_uint2(idx, idx << 1); } /*! * \param d_tree_aabbs Flat array holding all AABBs for the tree * \param d_morton_codes_red The Morton codes corresponding to the merged leafs * \param d_tree_parent_sib Parent and sibling indexes for all nodes * \param d_morton_types Morton-code type keys for all particles * \param d_pos Particle positions * \param d_num_per_type Number of particles per type * \param ntypes Number of particle types * \param d_map_tree_pid Sorted particle order (maps local index to ParticleData index) * \param d_leaf_offset Amount to subtract from the expected leaf starting index to make an array with no holes by type * \param d_type_head Index to first type and leaf ordered particles by type * \param Ntot Total number of keys to sort * \param nleafs Number of leaf nodes * * \returns cudaSuccess on completion */ cudaError_t gpu_nlist_merge_particles(Scalar4 *d_tree_aabbs, uint32_t *d_morton_codes_red, uint2 *d_tree_parent_sib, const uint64_t *d_morton_types, const Scalar4 *d_pos, const unsigned int *d_num_per_type, const unsigned int ntypes, const unsigned int *d_map_tree_pid, const unsigned int *d_leaf_offset, const unsigned int *d_type_head, const unsigned int Ntot, const unsigned int nleafs, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_merge_particles_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); gpu_nlist_merge_particles_kernel<<<nleafs/run_block_size + 1, block_size>>>(d_tree_aabbs, d_morton_codes_red, d_tree_parent_sib, d_morton_types, d_pos, d_num_per_type, ntypes, d_map_tree_pid, d_leaf_offset, d_type_head, Ntot, nleafs); return cudaSuccess; } //! Computes the longest common prefix between Morton codes /*! * \param d_morton_codes Array of Morton codes * \param i First Morton code index * \param j Second Morton code index * \param min_idx The smallest index considered "in range" (inclusive) * \param max_idx The last index considered "in range" (inclusive) * * \returns number of bits shared between the Morton codes of i and j * * delta(i,j) is defined as the largest number of bits shared between Morton codes i and j. When the Morton codes are * sorted, this implies delta(i',j') >= delta(i,j) for any i',j' in [i,j]. If i and j lie outside * of the range of Morton codes corresponding to this tree, then it always returns -1. If the Morton codes for i and j * are identical, then the longest prefix of i and j is used as a tie breaker. */ __device__ inline int delta(const uint32_t *d_morton_codes, unsigned int i, unsigned int j, int min_idx, int max_idx) { if (j > max_idx || j < min_idx) { return -1; } uint32_t first_code = d_morton_codes[i]; uint32_t last_code = d_morton_codes[j]; // if codes match, then use index as tie breaker // the number of shared bits is equal to the 32 bits in the integer, plus the number of bits shared between the // indexes (offset from the start of the node range to make things simpler) if (first_code == last_code) { return (32 + __clz((i-min_idx) ^ (j-min_idx))); } else { return __clz(first_code ^ last_code); } } //! Determines the range of Morton codes that a node covers /*! * \param d_morton_codes Array of Morton codes * \param min_idx The smallest Morton code index considered "in range" (inclusive) * \param max_idx The last Morton code index considered "in range" (inclusive) * \param idx Current node (Morton code) index * * \returns the minimum and maximum leafs covered by this node * \note This is a literal implementation of the Karras pseudocode, with no optimizations or refinement. * Tero Karras, "Maximizing parallelism in the construction of BVHs, octrees, and k-d trees", * High Performance Graphics (2012). */ __device__ inline uint2 determineRange(const uint32_t *d_morton_codes, const int min_idx, const int max_idx, const int idx) { int forward_prefix = delta(d_morton_codes, idx, idx+1, min_idx, max_idx); int backward_prefix = delta(d_morton_codes, idx, idx-1, min_idx, max_idx); // get direction of the range based on sign int d = ((forward_prefix - backward_prefix) > 0) ? 1 : -1; // get minimum prefix int min_prefix = delta(d_morton_codes, idx, idx-d, min_idx, max_idx); // get maximum prefix by binary search int lmax = 2; while( delta(d_morton_codes, idx, idx + d*lmax, min_idx, max_idx) > min_prefix) { lmax = lmax << 1; } unsigned int len = 0; unsigned int step = lmax; do { step = step >> 1; unsigned int new_len = len + step; if (delta(d_morton_codes, idx, idx + d*new_len, min_idx, max_idx) > min_prefix) len = new_len; } while (step > 1); // order range based on direction uint2 range; if (d > 0) { range.x = idx; range.y = idx + len; } else { range.x = idx - len; range.y = idx; } return range; } //! Finds the split position in Morton codes covered by a range /*! * \param d_morton_codes Array of Morton codes * \param first First leaf node in the range * \param last Last leaf node in the range * * \returns the leaf index corresponding to the split in Morton codes * See determineRange for original source of algorithm. */ __device__ inline unsigned int findSplit(const uint32_t *d_morton_codes, const unsigned int first, const unsigned int last) { uint32_t first_code = d_morton_codes[first]; uint32_t last_code = d_morton_codes[last]; // if codes match, then just split evenly if (first_code == last_code) return (first + last) >> 1; // get the length of the common prefix int common_prefix = __clz(first_code ^ last_code); // assume split starts at first, and begin binary search unsigned int split = first; unsigned int step = last - first; do { // exponential decrease (is factor of 2 best?) step = (step + 1) >> 1; unsigned int new_split = split + step; // if proposed split lies within range if (new_split < last) { unsigned int split_code = d_morton_codes[new_split]; int split_prefix = __clz(first_code ^ split_code); // if new split shares a longer number of bits, accept it if (split_prefix > common_prefix) { split = new_split; } } } while (step > 1); return split; } //! Kernel to generate the parent-child-sibling relationships between nodes /*! * \param d_tree_parent_sib Parent and sibling for each node in the tree * \param d_morton_codes Morton codes for each leaf node * \param d_num_per_type Number of particles per type * \param ntypes Number of types * \param nleafs Number of leafs * * \b Implementation * One thread is called per internal node in a single kernel launch. Each thread first determines its "local" index * as an internal node within a tree based on the number of leafs per tree. The range of leafs covered by the internal * node is determined, and then its split position is identified. The split identifies the children of the node as * another internal node or as a leaf node. * * The parent and sibling of each child node is saved. The sibling id is bit shifted so as to use a single bit to encode * the sibling as a right child or left child (after shifting, we set the bit to 1 if the sibling is a right child). * If the child is a root node, it also saves information for itself (since no other node ever identifies a root as a * child node). */ __global__ void gpu_nlist_gen_hierarchy_kernel(uint2 *d_tree_parent_sib, const uint32_t *d_morton_codes, const unsigned int *d_num_per_type, const unsigned int ntypes, const unsigned int nleafs, const unsigned int ninternal) { // compute the internal node index this thread operates on const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per internal node if (idx >= ninternal) return; // get what type of leaf I am unsigned int min_idx = 0; // the "0" of the leaf node array unsigned int max_idx = 0; // the "N-1" of the leaf node array unsigned int node_idx = idx; unsigned int origin = 0; unsigned int end = 0; unsigned int cur_type=0; unsigned int active_types=0; for (cur_type=0; cur_type < ntypes; ++cur_type) { // current min index is the previous max index min_idx = max_idx; // max index adds the number of internal nodes in this type (nleaf - 1) const unsigned int cur_nleaf = (d_num_per_type[cur_type] + NLIST_GPU_PARTICLES_PER_LEAF - 1)/NLIST_GPU_PARTICLES_PER_LEAF; if (cur_nleaf > 0) { max_idx += cur_nleaf-1; ++active_types; } // we break the loop if we are in range if (idx < max_idx) { // decrement by 1 to get this back into the number we really need --active_types; // now, we repurpose the min and max index to now correspond to the *leaf* index. // the min index is the minimum *leaf* index origin = min_idx + active_types; end = max_idx + active_types; node_idx += active_types; break; } } // enact the magical split determining uint2 range = determineRange(d_morton_codes, origin, end, node_idx); unsigned int first = range.x; unsigned int last = range.y; unsigned int split = findSplit(d_morton_codes, first, last); uint2 children; // set the children, shifting ahead by nleafs - cur_type to account for leaf shifting // this factor comes out from resetting 0 = N_leaf,i each time, and then remapping this to // an internal node children.x = (split == first) ? split : (nleafs - active_types + split); children.y = ((split + 1) == last) ? (split + 1) : nleafs - active_types + split + 1; uint2 parent_sib; parent_sib.x = nleafs + idx; // encode the sibling as the right child parent_sib.y = children.y << 1; parent_sib.y |= 1; d_tree_parent_sib[children.x] = parent_sib; // encode the sibling as the left child parent_sib.y = children.x << 1; d_tree_parent_sib[children.y] = parent_sib; // root is always number "zero", but only it can set its parent / sibling // we mark both of these as the root for traversing, since only the root node // will be its own sibling if (node_idx == origin) { parent_sib.x = nleafs + idx; parent_sib.y = (nleafs + idx) << 1; d_tree_parent_sib[nleafs + idx] = parent_sib; } } /*! * \param d_tree_parent_sib Parent and sibling for each node in the tree * \param d_morton_codes Morton codes for each leaf node * \param d_num_per_type Number of particles per type * \param ntypes Number of types * \param nleafs Number of leafs * \param block_size Requested thread block size * * \returns cudaSuccess on completion */ cudaError_t gpu_nlist_gen_hierarchy(uint2 *d_tree_parent_sib, const uint32_t *d_morton_codes, const unsigned int *d_num_per_type, const unsigned int ntypes, const unsigned int nleafs, const unsigned int ninternal, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_gen_hierarchy_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); // one thread per internal node gpu_nlist_gen_hierarchy_kernel<<<ninternal/run_block_size + 1, run_block_size>>>(d_tree_parent_sib, d_morton_codes, d_num_per_type, ntypes, nleafs, ninternal); return cudaSuccess; } //! Kernel to bubble up enclosing AABBs to internal nodes from leaf nodes /*! * \param d_node_locks Atomic flags identifying when node has been visited * \param d_tree_aabbs AABB array for all tree nodes * \param d_tree_parent_sib Parent and sibling indexes of each node * \param ntypes Number of particle types * \param nleafs Number of leaf nodes * * \b Implementation * One thread is called per leaf node. The second thread to reach an internal node processes its two children, * which guarantees that no node AABB is prematurely processed. The arrival order at a node is controlled by an atomic * thread lock in global memory. This locking could be accelerated by using shared memory whenever a node is being * processed by threads in the same block. * * When processing the node, the thread also walks up the tree to find the "rope" that tells a traverser * how to navigate the tree. If a query AABB intersects the current node, then the traverser always moves the the left * child of the current node. If the AABB does not intersect, it moves along the "rope" to the next portion of the tree. * The "rope" is calculated by walking back up the tree to find the earliest ancestor that is a left child of its * parent. The rope then goes to that ancestor's sibling. If the root node is reached, then the rope is set to -1 to * indicate traversal should be aborted. * * This kernel also encodes the left child of a node into the AABB for internal nodes. The thread processing the node * checks if it arrived from a left child or right child of the node it is processing, and sets the left child of that * parent accordingly. A child is indicated by bit shifting, and setting the first bit to 1. */ __global__ void gpu_nlist_bubble_aabbs_kernel(unsigned int *d_node_locks, Scalar4 *d_tree_aabbs, const uint2 *d_tree_parent_sib, const unsigned int ntypes, const unsigned int nleafs) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= nleafs) return; // okay, first we start from the leaf and set my bounding box Scalar4 cur_upper = d_tree_aabbs[2*idx]; Scalar4 cur_lower = d_tree_aabbs[2*idx+1]; // zero the counters for internal nodes cur_upper.w = 0.0f; cur_lower.w = 0.0f; unsigned int cur_node = idx; unsigned int lock_key = 0; do { uint2 cur_parent_sib = d_tree_parent_sib[cur_node]; unsigned int cur_parent = cur_parent_sib.x; // if the current sibling is a right child, then the current node is a left child bool cur_is_left = (cur_parent_sib.y & 1); unsigned int cur_sibling = cur_parent_sib.y >> 1; // first we compute the skip for this node always // back track up the tree until you find a left child // we have a check in place so that we don't stall on the root node uint2 backtrack = cur_parent_sib; while (!(backtrack.y & 1) && backtrack.x != (backtrack.y >> 1)) { backtrack = d_tree_parent_sib[backtrack.x]; } // then, the skip is to the sibling of that node, or else to quit if (backtrack.y & 1) { d_tree_aabbs[2*cur_node].w = __int_as_scalar(backtrack.y >> 1); } else { d_tree_aabbs[2*cur_node].w = __int_as_scalar(-1); } // then, we do an atomicAdd on the lock to see if we need to process the parent AABBs // check to make sure the parent is bigger than nleafs, or else the node lock always fails // so that we terminate the thread lock_key = (cur_parent >= nleafs) ? atomicAdd(d_node_locks + cur_parent - nleafs, 1) : 0; // process the node if (lock_key == 1) { // compute the max upper bound Scalar4 sib_upper = d_tree_aabbs[2*cur_sibling]; if (sib_upper.x > cur_upper.x) cur_upper.x = sib_upper.x; if (sib_upper.y > cur_upper.y) cur_upper.y = sib_upper.y; if (sib_upper.z > cur_upper.z) cur_upper.z = sib_upper.z; d_tree_aabbs[2*cur_parent] = cur_upper; // compute the min lower bound Scalar4 sib_lower = d_tree_aabbs[2*cur_sibling+1]; if (sib_lower.x < cur_lower.x) cur_lower.x = sib_lower.x; if (sib_lower.y < cur_lower.y) cur_lower.y = sib_lower.y; if (sib_lower.z < cur_lower.z) cur_lower.z = sib_lower.z; // this must always be some internal node, so stash the left child of this node here unsigned int left_child_masked = ((cur_is_left ? cur_node : cur_sibling) << 1) | 1; cur_lower.w = __int_as_scalar( left_child_masked ); d_tree_aabbs[2*cur_parent+1] = cur_lower; // bump the current node one level cur_node = cur_parent; } } while (lock_key == 1); } /*! * \param d_node_locks Atomic flags identifying when node has been visited * \param d_tree_aabbs AABB array for all tree nodes * \param d_tree_parent_sib Parent and sibling indexes of each node * \param ntypes Number of particle types * \param nleafs Number of leaf nodes * \param block_size Requested thread block size * * \returns cudaSuccess on completion */ cudaError_t gpu_nlist_bubble_aabbs(unsigned int *d_node_locks, Scalar4 *d_tree_aabbs, const uint2 *d_tree_parent_sib, const unsigned int ntypes, const unsigned int nleafs, const unsigned int ninternal, const unsigned int block_size) { cudaMemset(d_node_locks, 0, sizeof(unsigned int)*ninternal); gpu_nlist_bubble_aabbs_kernel<<<nleafs/block_size + 1, block_size>>>(d_node_locks, d_tree_aabbs, d_tree_parent_sib, ntypes, nleafs); return cudaSuccess; } //! Kernel to rearrange particle data into leaf order for faster traversal /*! * \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order * \param d_leaf_db Particle diameter and body id in leaf order * \param d_pos Particle positions * \param d_diameter Particle diameters * \param d_body Particle body ids * \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id * \param Ntot Number of particles owned by this rank * * \b Implementation * One thread per particle is called. Writes are coalesced by writing in leaf order, and reading in a scattered way. */ __global__ void gpu_nlist_move_particles_kernel(Scalar4 *d_leaf_xyzf, Scalar2 *d_leaf_db, const Scalar4 *d_pos, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int *d_map_tree_pid, const unsigned int Ntot) { // get thread index const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per particle if (idx >= Ntot) return; // read and write particle data unsigned int p_idx = d_map_tree_pid[idx]; Scalar4 pos_i = d_pos[p_idx]; d_leaf_xyzf[idx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __int_as_scalar(p_idx)); Scalar2 db = make_scalar2(d_diameter[p_idx], __int_as_scalar(d_body[p_idx])); d_leaf_db[idx] = db; } /*! * \param d_leaf_xyzf Particle xyz coordinates + particle id in leaf order * \param d_leaf_db Particle diameter and body id in leaf order * \param d_pos Particle positions * \param d_diameter Particle diameters * \param d_body Particle body ids * \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id * \param Ntot Number of particles owned by this rank * \param block_size Requested thread block size * * \returns cudaSuccess on completion */ cudaError_t gpu_nlist_move_particles(Scalar4 *d_leaf_xyzf, Scalar2 *d_leaf_db, const Scalar4 *d_pos, const Scalar *d_diameter, const unsigned int *d_body, const unsigned int *d_map_tree_pid, const unsigned int Ntot, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_move_particles_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); gpu_nlist_move_particles_kernel<<<Ntot/run_block_size + 1, run_block_size>>>(d_leaf_xyzf, d_leaf_db, d_pos, d_diameter, d_body, d_map_tree_pid, Ntot); return cudaSuccess; } //! Kernel for traversing tree to generate neighbor list /*! * \param d_nlist Neighbor list for writing * \param d_n_neigh Number of neighbors per particle * \param d_last_updated_pos Records current particle positions * \param d_conditions Store overflow condition by type * \param d_Nmax Maximum number of neighbors allocated by type * \param d_head_list Indexes for writing into neighbor list * \param N Number of particles * \param nghosts Number of ghost particles * \param d_map_tree_pid Map leaf index to local particle index * \param d_leaf_offset Offset for reading leaf particles by type * \param d_tree_roots Index for tree root by type * \param d_tree_aabbs Tree AABBs * \param nleafs Total number of leafs * \param d_leaf_xyzf Leaf position-id array * \param d_leaf_db Leaf diameter-body array * \param d_pos Particle positions * \param d_image_list Translation vectors to check for traversal * \param nimages Number of translation vectors to check * \param d_r_cut Cutoff radius by type r_cut(i,j) * \param r_buff Buffer around cutoff radius * \param max_diam Maximum diameter attained by a particle for diameter shifting * \param ntypes Number of particle types * * \b Implementation * One thread is launched per particle, but the threads operate on particles in leaf order rather than ParticleData * order in order to minimize divergence within a warp (particles in the same leaf should intersect similar parts of the * tree). Each thread iterates on the particle types (trees) and queries on all translation vectors using a stackless * search. When the query AABB intersects a node AABB, the node AABB is checked to be an internal node or a leaf node. * If an internal node, then the traversal advances to that node's left child. If a leaf node, the leaf particles are * tested directly to be included in the neighbor list. The node then advances along that leaf node's rope. If the AABB * is not intersected, the traversal advances along the rope. This process proceeds until a rope signals that the * traversal is complete. */ template<unsigned char flags> __global__ void gpu_nlist_traverse_tree_kernel(unsigned int *d_nlist, unsigned int *d_n_neigh, Scalar4 *d_last_updated_pos, unsigned int *d_conditions, const unsigned int *d_Nmax, const unsigned int *d_head_list, const unsigned int N, const unsigned int nghosts, const unsigned int *d_map_tree_pid, const unsigned int *d_leaf_offset, const unsigned int *d_tree_roots, const Scalar4 *d_tree_aabbs, const unsigned int nleafs, const Scalar4 *d_leaf_xyzf, const Scalar2 *d_leaf_db, const Scalar4 *d_pos, const Scalar3 *d_image_list, const unsigned int nimages, const Scalar *d_r_cut, const Scalar r_buff, const Scalar max_diam, const unsigned int ntypes) { bool filter_body = flags & 1; bool diameter_shift = flags & 2; // cache the r_listsq parameters into shared memory const Index2D typpair_idx(ntypes); const unsigned int num_typ_parameters = typpair_idx.getNumElements(); // shared data for per type pair parameters extern __shared__ unsigned char s_data[]; // pointer for the r_listsq data Scalar *s_r_list = (Scalar *)(&s_data[0]); unsigned int *s_Nmax = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters]); unsigned int *s_leaf_offset = (unsigned int *)(&s_data[sizeof(Scalar)*num_typ_parameters + sizeof(unsigned int)*ntypes]); // load in the per type pair r_list for (unsigned int cur_offset = 0; cur_offset < num_typ_parameters; cur_offset += blockDim.x) { if (cur_offset + threadIdx.x < num_typ_parameters) { Scalar r_cut = d_r_cut[cur_offset + threadIdx.x]; // force the r_list(i,j) to a skippable value if r_cut(i,j) is skippable s_r_list[cur_offset + threadIdx.x] = (r_cut > Scalar(0.0)) ? r_cut+r_buff : Scalar(-1.0); } if (cur_offset + threadIdx.x < ntypes) { s_Nmax[cur_offset + threadIdx.x] = d_Nmax[cur_offset + threadIdx.x]; s_leaf_offset[cur_offset + threadIdx.x] = d_leaf_offset[cur_offset + threadIdx.x]; } } __syncthreads(); // compute the particle index this thread operates on const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // quit now if this thread is processing past the end of the leaf list if (idx >= (N+nghosts)) return; // read in the current position unsigned int my_pidx = d_map_tree_pid[idx]; // we only process particles owned by this processor for neighbors if (my_pidx >= N) return; const Scalar4 postype_i = texFetchScalar4(d_pos, pdata_pos_tex, my_pidx); const Scalar3 pos_i = make_scalar3(postype_i.x, postype_i.y, postype_i.z); const unsigned int type_i = __scalar_as_int(postype_i.w); // fetch the diameter and body out of the leaf texture since it's bound anyway const Scalar2 db_i = texFetchScalar2(d_leaf_db, leaf_db_tex, idx); const Scalar diam_i = db_i.x; const unsigned int body_i = __scalar_as_int(db_i.y); const unsigned int nlist_head_i = texFetchUint(d_head_list, head_list_tex, my_pidx); unsigned int n_neigh_i = 0; for (unsigned int cur_pair_type=0; cur_pair_type < ntypes; ++cur_pair_type) { // Check primary box const Scalar r_cut_i = s_r_list[typpair_idx(type_i,cur_pair_type)]; // Skip this tree type if it is not needed if (r_cut_i <= Scalar(0.0)) continue; // stash the r_cutsq before any diameter shifting const Scalar r_cutsq_i = r_cut_i*r_cut_i; // the rlist to use for the AABB search has to be at least as big as the biggest diameter Scalar r_list_i = r_cut_i; if (diameter_shift) r_list_i += max_diam - Scalar(1.0); const unsigned int cur_tree_root = d_tree_roots[cur_pair_type]; // skip this type if we don't have it if (cur_tree_root == NLIST_GPU_INVALID_NODE) continue; for (unsigned int cur_image = 0; cur_image < nimages; ++cur_image) { const Scalar3 pos_i_image = pos_i + d_image_list[cur_image]; const Scalar3 aabb_upper = make_scalar3(pos_i_image.x + r_list_i, pos_i_image.y + r_list_i, pos_i_image.z + r_list_i); const Scalar3 aabb_lower = make_scalar3(pos_i_image.x - r_list_i, pos_i_image.y - r_list_i, pos_i_image.z - r_list_i); // stackless search int cur_node_idx = cur_tree_root; while (cur_node_idx > -1) { const Scalar4 upper_rope = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx); const Scalar4 lower_np = texFetchScalar4(d_tree_aabbs, aabb_node_bounds_tex, 2*cur_node_idx+1); if (!(aabb_upper.x < lower_np.x || aabb_lower.x > upper_rope.x || aabb_upper.y < lower_np.y || aabb_lower.y > upper_rope.y || aabb_upper.z < lower_np.z || aabb_lower.z > upper_rope.z)) { const unsigned int np_child_masked = __scalar_as_int(lower_np.w); if(!(np_child_masked & 1)) { // leaf node // all leaves must have at least 1 particle, so we can use this to decide const unsigned int node_head = NLIST_GPU_PARTICLES_PER_LEAF*cur_node_idx - s_leaf_offset[cur_pair_type]; const unsigned int n_part = np_child_masked >> 1; for (unsigned int cur_p = node_head; cur_p < node_head + n_part; ++cur_p) { // neighbor j const Scalar4 cur_xyzf = texFetchScalar4(d_leaf_xyzf, leaf_xyzf_tex, cur_p); const Scalar3 pos_j = make_scalar3(cur_xyzf.x, cur_xyzf.y, cur_xyzf.z); const unsigned int j = __scalar_as_int(cur_xyzf.w); const Scalar2 cur_db = texFetchScalar2(d_leaf_db, leaf_db_tex, cur_p); const Scalar diam_j = cur_db.x; const unsigned int body_j = __scalar_as_int(cur_db.y); bool excluded = (my_pidx == j); if (filter_body && body_i != 0xffffffff) excluded = excluded | (body_i == body_j); if (!excluded) { // now we can trim down the actual particles based on diameter // compute the shift for the cutoff if not excluded Scalar sqshift = Scalar(0.0); if (diameter_shift) { const Scalar delta = (diam_i + diam_j) * Scalar(0.5) - Scalar(1.0); // r^2 < (r_list + delta)^2 // r^2 < r_listsq + delta^2 + 2*r_list*delta sqshift = (delta + Scalar(2.0) * r_cut_i) * delta; } // compute distance and wrap back into box Scalar3 drij = pos_j - pos_i_image; Scalar dr2 = dot(drij,drij); if (dr2 <= (r_cutsq_i + sqshift)) { if (n_neigh_i < s_Nmax[type_i]) { d_nlist[nlist_head_i + n_neigh_i] = j; } ++n_neigh_i; } } } // leaf nodes always move to their rope cur_node_idx = __scalar_as_int(upper_rope.w); } else { // internal node, take left child cur_node_idx = (np_child_masked >> 1); } } else { cur_node_idx = __scalar_as_int(upper_rope.w); // no overlap, rope ahead } } // end stackless search } // end loop over images } // end loop over pair types // could try reordering by idx instead of pidx, but that seems to not make much difference in microbenchmarking. d_n_neigh[my_pidx] = n_neigh_i; d_last_updated_pos[my_pidx] = make_scalar4(pos_i.x, pos_i.y, pos_i.z, __scalar_as_int(type_i)); // update the number of neighbors for this type if allocated memory is exceeded if (n_neigh_i >= s_Nmax[type_i]) atomicMax(&d_conditions[type_i], n_neigh_i); } /*! * \param d_nlist Neighbor list for writing * \param d_n_neigh Number of neighbors per particle * \param d_last_updated_pos Records current particle positions * \param d_conditions Store overflow condition by type * \param d_Nmax Maximum number of neighbors allocated by type * \param d_head_list Indexes for writing into neighbor list * \param N Number of particles * \param nghosts Number of ghost particles * \param d_map_tree_pid Map leaf index to local particle index * \param d_leaf_offset Offset for reading leaf particles by type * \param d_tree_roots Index for tree root by type * \param d_tree_aabbs Tree AABBs * \param nleafs Total number of leafs * \param d_leaf_xyzf Leaf position-id array * \param d_leaf_db Leaf diameter-body array * \param d_pos Particle positions * \param d_image_list Translation vectors to check for traversal * \param nimages Number of translation vectors to check * \param d_r_cut Cutoff radius by type r_cut(i,j) * \param r_buff Buffer around cutoff radius * \param max_diam Maximum diameter attained by a particle for diameter shifting * \param ntypes Number of particle types * \param filter_body True if body filtering is enabled * \param diameter_shift True if rcut(i,j) should be shifted by the particle diameters * \param compute_capability Compute capability of the GPU (in 20, 30, 35 format) * \param block_size Requested thread block size * * \returns cudaSuccess on completion * \returns cudaError on failure to texture bind * * \note Kernel calls are templated on body filtering and diameter shifting for optimization. * \note One thread is called for all leaf particles. Some of these threads will die because they correspond to ghost * particles not owned by the rank. Because the leaf particles are sorted, there is no easy way to skip these * particles, and this inefficiency is assumed to be relatively small. */ cudaError_t gpu_nlist_traverse_tree(unsigned int *d_nlist, unsigned int *d_n_neigh, Scalar4 *d_last_updated_pos, unsigned int *d_conditions, const unsigned int *d_Nmax, const unsigned int *d_head_list, const unsigned int N, const unsigned int nghosts, const unsigned int *d_map_tree_pid, const unsigned int *d_leaf_offset, const unsigned int *d_tree_roots, const Scalar4 *d_tree_aabbs, const unsigned int nleafs, const unsigned int ninternal, const unsigned int nnodes, const Scalar4 *d_leaf_xyzf, const Scalar2 *d_leaf_db, const Scalar4 *d_pos, const Scalar3 *d_image_list, const unsigned int nimages, const Scalar *d_r_cut, const Scalar r_buff, const Scalar max_diam, const unsigned int ntypes, bool filter_body, bool diameter_shift, const unsigned int compute_capability, const unsigned int block_size) { // shared memory = r_list + Nmax Index2D typpair_idx(ntypes); unsigned int shared_size = sizeof(Scalar)*typpair_idx.getNumElements() + 2*sizeof(unsigned int)*ntypes; // bind the neighborlist texture if (compute_capability < 35) { pdata_pos_tex.normalized = false; pdata_pos_tex.filterMode = cudaFilterModePoint; cudaError_t error = cudaBindTexture(0, pdata_pos_tex, d_pos, sizeof(Scalar4)*(N+nghosts)); if (error != cudaSuccess) return error; leaf_xyzf_tex.normalized = false; leaf_xyzf_tex.filterMode = cudaFilterModePoint; error = cudaBindTexture(0, leaf_xyzf_tex, d_leaf_xyzf, sizeof(Scalar4)*(N+nghosts)); if (error != cudaSuccess) return error; leaf_db_tex.normalized = false; leaf_db_tex.filterMode = cudaFilterModePoint; error = cudaBindTexture(0, leaf_db_tex, d_leaf_db, sizeof(Scalar2)*(N+nghosts)); if (error != cudaSuccess) return error; aabb_node_bounds_tex.normalized = false; aabb_node_bounds_tex.filterMode = cudaFilterModePoint; error = cudaBindTexture(0, aabb_node_bounds_tex, d_tree_aabbs, sizeof(Scalar4)*2*nnodes); if (error != cudaSuccess) return error; head_list_tex.normalized = false; head_list_tex.filterMode = cudaFilterModePoint; error = cudaBindTexture(0, head_list_tex, d_head_list, sizeof(unsigned int)*N); if (error != cudaSuccess) return error; } if (!filter_body && !diameter_shift) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<0>); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); int nblocks = (N+nghosts)/run_block_size + 1; gpu_nlist_traverse_tree_kernel<0><<<nblocks, run_block_size, shared_size>>>(d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, N, nghosts, d_map_tree_pid, d_leaf_offset, d_tree_roots, d_tree_aabbs, nleafs, d_leaf_xyzf, d_leaf_db, d_pos, d_image_list, nimages, d_r_cut, r_buff, max_diam, ntypes); } else if (filter_body && !diameter_shift) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<1>); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); int nblocks = (N+nghosts)/run_block_size + 1; gpu_nlist_traverse_tree_kernel<1><<<nblocks, run_block_size, shared_size>>>(d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, N, nghosts, d_map_tree_pid, d_leaf_offset, d_tree_roots, d_tree_aabbs, nleafs, d_leaf_xyzf, d_leaf_db, d_pos, d_image_list, nimages, d_r_cut, r_buff, max_diam, ntypes); } else if (!filter_body && diameter_shift) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<2>); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); int nblocks = (N+nghosts)/run_block_size + 1; gpu_nlist_traverse_tree_kernel<2><<<nblocks, run_block_size, shared_size>>>(d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, N, nghosts, d_map_tree_pid, d_leaf_offset, d_tree_roots, d_tree_aabbs, nleafs, d_leaf_xyzf, d_leaf_db, d_pos, d_image_list, nimages, d_r_cut, r_buff, max_diam, ntypes); } else if (filter_body && diameter_shift) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, gpu_nlist_traverse_tree_kernel<3>); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); int nblocks = (N+nghosts)/run_block_size + 1; gpu_nlist_traverse_tree_kernel<3><<<nblocks, run_block_size, shared_size>>>(d_nlist, d_n_neigh, d_last_updated_pos, d_conditions, d_Nmax, d_head_list, N, nghosts, d_map_tree_pid, d_leaf_offset, d_tree_roots, d_tree_aabbs, nleafs, d_leaf_xyzf, d_leaf_db, d_pos, d_image_list, nimages, d_r_cut, r_buff, max_diam, ntypes); } // unbind the textures if (compute_capability < 35) { cudaError_t error = cudaUnbindTexture(pdata_pos_tex); if (error != cudaSuccess) return error; error = cudaUnbindTexture(leaf_xyzf_tex); if (error != cudaSuccess) return error; error = cudaUnbindTexture(leaf_db_tex); if (error != cudaSuccess) return error; error = cudaUnbindTexture(aabb_node_bounds_tex); if (error != cudaSuccess) return error; error = cudaUnbindTexture(head_list_tex); if (error != cudaSuccess) return error; } return cudaSuccess; } //! Kernel to find divisons between particle types in sorted order /*! * \param d_type_head Index to first type in leaf ordered particles by type * \param d_pos Particle positions * \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id * \param N Total number of particles on rank (including ghosts) * * The starting index for each type of particles is the first particle where the left neighbor is not of the same type. */ __global__ void gpu_nlist_get_divisions_kernel(unsigned int *d_type_head, const Scalar4 *d_pos, const unsigned int *d_map_tree_pid, const unsigned int N) { // compute the particle index this thread operates on const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per particle if (idx >= N) return; const unsigned int cur_pidx = d_map_tree_pid[idx]; // get type of the current particle const Scalar4 cur_postype = d_pos[cur_pidx]; const unsigned int cur_type = __scalar_as_int(cur_postype.w); // all particles except for the first one should look left if (idx > 0) { const unsigned int left_pidx = d_map_tree_pid[idx - 1]; // get type of the particle to my left const Scalar4 left_postype = d_pos[left_pidx]; const unsigned int left_type = __scalar_as_int(left_postype.w); // if the left has a different type, then this is a type boundary, and the type starts at the current thread index if (left_type != cur_type) { d_type_head[cur_type] = idx + 1; // offset the index +1 so that we can use 0 to mean "none of this found" } } else // the first particle just sets its type to be 1 { d_type_head[cur_type] = 1; } } /*! * \param d_type_head Index to first type in leaf ordered particles by type * \param d_num_per_type Number of particles per type * \param d_leaf_offset Offset for reading particles out of leaf order * \param d_tree_roots Root node of each tree * \param d_pos Particles positions * \param d_map_tree_pid ParticleData indexes corresponding to a leaf particle id * \param N Total number of particles on rank (including ghosts) * \param ntypes Number of types * \param block_size Requested thread block size * * \returns cudaSuccess on completion */ cudaError_t gpu_nlist_init_count(unsigned int *d_type_head, const Scalar4 *d_pos, const unsigned int *d_map_tree_pid, const unsigned int N, const unsigned int ntypes, const unsigned int block_size) { // apply the scan static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_nlist_get_divisions_kernel); max_block_size = attr.maxThreadsPerBlock; } int run_block_size = min(block_size,max_block_size); // zero out the head list cudaMemset(d_type_head, 0, sizeof(unsigned int)*ntypes); // get the head list divisions gpu_nlist_get_divisions_kernel<<<N/run_block_size + 1, run_block_size>>>(d_type_head, d_pos, d_map_tree_pid, N); return cudaSuccess; } #undef MORTON_CODE_BITS #undef MORTON_TYPE_MASK_64 #undef MORTON_CODE_N_BINS
fccf94e3f1cff268cf6315b7a67e43fa6ec2cf76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) // Compute C = A * B __global__ void matrixMultiplyShared(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP const int TILE_WIDTH = 32; __shared__ float sharedA[TILE_WIDTH][TILE_WIDTH]; __shared__ float sharedB[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by*TILE_WIDTH + ty; int Col = bx*TILE_WIDTH + tx; float Cvalue = 0.0; if (numAColumns != numBRows) return ; for (int i = 0; i < (int)(ceil((float)numAColumns/TILE_WIDTH)); i++) { if (i*TILE_WIDTH + tx < numAColumns && Row < numARows){ sharedA[ty][tx] = A[Row*numAColumns + i*TILE_WIDTH + tx]; }else{ sharedA[ty][tx] = 0.0; } if (i*TILE_WIDTH + ty < numBRows && Col < numBColumns){ sharedB[ty][tx] = B[(i*TILE_WIDTH + ty)*numBColumns + Col]; }else{ sharedB[ty][tx] = 0.0; } __syncthreads(); if(Row < numARows && Col < numBColumns){ for(int j = 0; j < TILE_WIDTH; j++) Cvalue += sharedA[ty][j] * sharedB[j][tx]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns + Col] = Cvalue; } int main(int argc, char ** argv) { wbArg_t args; float * hostA; // The A matrix float * hostB; // The B matrix float * hostC; // The output C matrix float * deviceA; float * deviceB; float * deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) int TILE_WIDTH = 32; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = 0; numCColumns = 0; numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float*) malloc(sizeof(float)*numCRows*numCColumns); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here hipMalloc((void**)&deviceA , sizeof(float)*numARows*numAColumns ); hipMalloc((void**)&deviceB , sizeof(float)*numBRows*numBColumns); hipMalloc((void**)&deviceC , sizeof(float)*numCRows*numCColumns); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here int dimX = (int)(ceil((float)numCColumns / TILE_WIDTH)); int dimY = (int)(ceil((float)numCRows / TILE_WIDTH)); dim3 DimGrid(dimX, dimY); dim3 DimBlock(TILE_WIDTH, TILE_WIDTH); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiplyShared), dim3(DimGrid) , dim3(DimBlock), 0, 0, deviceA , deviceB , deviceC , numARows , numAColumns, numBRows ,numBColumns , numCRows , numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns , hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
fccf94e3f1cff268cf6315b7a67e43fa6ec2cf76.cu
#include <wb.h> #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) // Compute C = A * B __global__ void matrixMultiplyShared(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP const int TILE_WIDTH = 32; __shared__ float sharedA[TILE_WIDTH][TILE_WIDTH]; __shared__ float sharedB[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by*TILE_WIDTH + ty; int Col = bx*TILE_WIDTH + tx; float Cvalue = 0.0; if (numAColumns != numBRows) return ; for (int i = 0; i < (int)(ceil((float)numAColumns/TILE_WIDTH)); i++) { if (i*TILE_WIDTH + tx < numAColumns && Row < numARows){ sharedA[ty][tx] = A[Row*numAColumns + i*TILE_WIDTH + tx]; }else{ sharedA[ty][tx] = 0.0; } if (i*TILE_WIDTH + ty < numBRows && Col < numBColumns){ sharedB[ty][tx] = B[(i*TILE_WIDTH + ty)*numBColumns + Col]; }else{ sharedB[ty][tx] = 0.0; } __syncthreads(); if(Row < numARows && Col < numBColumns){ for(int j = 0; j < TILE_WIDTH; j++) Cvalue += sharedA[ty][j] * sharedB[j][tx]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns + Col] = Cvalue; } int main(int argc, char ** argv) { wbArg_t args; float * hostA; // The A matrix float * hostB; // The B matrix float * hostC; // The output C matrix float * deviceA; float * deviceB; float * deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) int TILE_WIDTH = 32; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = 0; numCColumns = 0; numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float*) malloc(sizeof(float)*numCRows*numCColumns); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here cudaMalloc((void**)&deviceA , sizeof(float)*numARows*numAColumns ); cudaMalloc((void**)&deviceB , sizeof(float)*numBRows*numBColumns); cudaMalloc((void**)&deviceC , sizeof(float)*numCRows*numCColumns); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here int dimX = (int)(ceil((float)numCColumns / TILE_WIDTH)); int dimY = (int)(ceil((float)numCRows / TILE_WIDTH)); dim3 DimGrid(dimX, dimY); dim3 DimBlock(TILE_WIDTH, TILE_WIDTH); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiplyShared<<<DimGrid , DimBlock>>>(deviceA , deviceB , deviceC , numARows , numAColumns, numBRows ,numBColumns , numCRows , numCColumns); cudaThreadSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns , cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
c393a563252944c459e16af001cf90e776583e46.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // process BICG_BATCH elements in thread #define BICG_BATCH 8 #define BICG_STEP 32/BICG_BATCH typedef float DATA_TYPE; extern "C" __global__ void bicgKernel1( DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *q, int m, int n) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < n) { q[i] = 0.0; int j; for (j = 0; j < m; j++) { q[i] += A[i * m + j] * p[j]; } } } extern "C" __global__ void bicgKernel2( DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s, int m, int n) { int j = blockDim.x*blockIdx.x + threadIdx.x; if (j < m) { s[j] = 0.0; int i; for (i = 0; i < n; i++) { s[j] += A[i * m + j] * r[i]; } } } extern "C" __global__ void bicgFusedRef( float *A, float *x1, float *y1, float *x2, float *y2, int m, int n) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; __shared__ float s_A[32][33]; __shared__ float s_x1[32]; __shared__ float s_x2[32]; float l_sum = 0.0f; // load x2 if (ty == 0) s_x2[tx] = x2[bx * 32 + tx]; for (int i = m*by; i < m*(by + 1); i += 32) { // load x1 if (ty == 1) s_x1[tx] = x1[i + tx]; __syncthreads(); for (int j = 0; j < 32; j += BICG_STEP) { s_A[ty + j][tx] = A[(i + ty + j)*n + bx * 32 + tx]; l_sum += s_A[ty + j][tx] * s_x1[ty + j]; } __syncthreads(); float tmp = 0.0f; for (int j = 0; j < 32; j += BICG_STEP) tmp += s_A[tx][ty + j] * s_x2[ty + j]; s_A[tx][ty] = tmp; __syncthreads(); if (ty < 2) s_A[tx][ty] = tmp = tmp + s_A[tx][ty + 2]; __syncthreads(); if (ty == 0) { atomicAdd(y2 + i + tx, tmp + s_A[tx][1]); } } // compute total sum __syncthreads(); s_A[ty][tx] = l_sum; __syncthreads(); if (ty < 2) { s_A[ty][tx] = l_sum = l_sum + s_A[ty + 2][tx]; } __syncthreads(); if (ty == 0) { atomicAdd(y1 + bx * 32 + tx, l_sum + s_A[1][tx]); } }
c393a563252944c459e16af001cf90e776583e46.cu
// process BICG_BATCH elements in thread #define BICG_BATCH 8 #define BICG_STEP 32/BICG_BATCH typedef float DATA_TYPE; extern "C" __global__ void bicgKernel1( DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *q, int m, int n) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < n) { q[i] = 0.0; int j; for (j = 0; j < m; j++) { q[i] += A[i * m + j] * p[j]; } } } extern "C" __global__ void bicgKernel2( DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s, int m, int n) { int j = blockDim.x*blockIdx.x + threadIdx.x; if (j < m) { s[j] = 0.0; int i; for (i = 0; i < n; i++) { s[j] += A[i * m + j] * r[i]; } } } extern "C" __global__ void bicgFusedRef( float *A, float *x1, float *y1, float *x2, float *y2, int m, int n) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; __shared__ float s_A[32][33]; __shared__ float s_x1[32]; __shared__ float s_x2[32]; float l_sum = 0.0f; // load x2 if (ty == 0) s_x2[tx] = x2[bx * 32 + tx]; for (int i = m*by; i < m*(by + 1); i += 32) { // load x1 if (ty == 1) s_x1[tx] = x1[i + tx]; __syncthreads(); for (int j = 0; j < 32; j += BICG_STEP) { s_A[ty + j][tx] = A[(i + ty + j)*n + bx * 32 + tx]; l_sum += s_A[ty + j][tx] * s_x1[ty + j]; } __syncthreads(); float tmp = 0.0f; for (int j = 0; j < 32; j += BICG_STEP) tmp += s_A[tx][ty + j] * s_x2[ty + j]; s_A[tx][ty] = tmp; __syncthreads(); if (ty < 2) s_A[tx][ty] = tmp = tmp + s_A[tx][ty + 2]; __syncthreads(); if (ty == 0) { atomicAdd(y2 + i + tx, tmp + s_A[tx][1]); } } // compute total sum __syncthreads(); s_A[ty][tx] = l_sum; __syncthreads(); if (ty < 2) { s_A[ty][tx] = l_sum = l_sum + s_A[ty + 2][tx]; } __syncthreads(); if (ty == 0) { atomicAdd(y1 + bx * 32 + tx, l_sum + s_A[1][tx]); } }
3f3cd2ea93828e31557772f94ac0a4ee67a84862.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> __global__ void checkIndex(void) { printf("threadIdx: (%d, %d, %d) blockIdx: (%d, %d, %d) blockDim: (%d, %d, %d) " "gridDim: (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(void) { int nElem = 6; dim3 block(3); dim3 grid((nElem + block.x - 1) / block.x); // check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // check grid and block dimension from device side hipLaunchKernelGGL(( checkIndex) , dim3(grid), dim3(block), 0, 0, ); hipDeviceReset(); return 0; }
3f3cd2ea93828e31557772f94ac0a4ee67a84862.cu
#include <cuda_runtime.h> #include <stdio.h> __global__ void checkIndex(void) { printf("threadIdx: (%d, %d, %d) blockIdx: (%d, %d, %d) blockDim: (%d, %d, %d) " "gridDim: (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(void) { int nElem = 6; dim3 block(3); dim3 grid((nElem + block.x - 1) / block.x); // check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // check grid and block dimension from device side checkIndex <<<grid, block>>>(); cudaDeviceReset(); return 0; }
3d78e62b1e87763dd3eca4bb2986c4d2c06cfa3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> //#include "mex.h" /* Kernel to square elements of the array on the GPU */ __global__ void norm_elements(float* in, float* out, unsigned int N) { __shared__ float vOut[16]; int idx = blockIdx.x*blockDim.x+threadIdx.x; if ( idx < N)vOut[idx] = in[idx]*in[idx]; __syncthreads(); if(idx == 0) { out[0] = 0; int i; for ( i = 0; i < N; i++ ) { out[0] += vOut[i]; } } __syncthreads(); if(idx == 0) { out[0] = sqrt(out[0]); } } void square_host(double* pIn, double *pOut, int sizeIn, int sizeOut) { int i, j; double *data1, *data2; float *data1f, *data2f; float *data1f_gpu, *data2f_gpu; int sizeBlock; sizeBlock = 16; data1 = pIn; data2 = pOut; /* Find the dimensions of the data */ /* Create an mxArray for the output data */ /* Create an input and output data array on the GPU*/ hipMalloc( (void **) &data1f_gpu,sizeof(float)*sizeIn); hipMalloc( (void **) &data2f_gpu,sizeof(float)*sizeOut); /* Retrieve the input data */ /* Check if the input array is single or double precision */ /* The input array is in double precision, it needs to be converted t floats before being sent to the card */ data1f = (float *) malloc(sizeof(float)*sizeIn); for (j = 0; j < sizeIn; j++) { data1f[j] = (float) data1[j]; } for (i = 0; i < sizeIn; i++) { printf("data1f[%d] = %f, ", i, data1f[i]); } printf("\n"); hipMemcpy( data1f_gpu, data1f, sizeof(float)*sizeIn, hipMemcpyHostToDevice); data2f = (float *) malloc(sizeof(float)*sizeOut); //hipMemcpy( data2f_gpu, data2f, sizeof(float)*sizeOut, hipMemcpyHostToDevice); /* Compute execution configuration using 128 threads per block */ dim3 dimBlock(sizeBlock); dim3 dimGrid((sizeIn)/dimBlock.x); if ( (sizeIn) % sizeBlock !=0 ) dimGrid.x+=1; /* Call function on GPU */hipLaunchKernelGGL(( norm_elements), dim3(dimGrid),dim3(dimBlock), 0, 0, data1f_gpu, data2f_gpu, sizeIn); hipError_t e; e = hipGetLastError(); if ( e != hipSuccess) { fprintf(stderr, "CUDA Error on square_elements: '%s' \n", hipGetErrorString(e)); exit(-1); } /* Copy result back to host */ hipMemcpy( data2f, data2f_gpu, sizeof(float)*sizeOut, hipMemcpyDeviceToHost); for (i = 0; i < sizeOut; i++) { printf("data2f[%d] = %f, ", i, data2f[i]); } printf("\n"); /* Create a pointer to the output data */ /* Convert from single to double before returning */ for (j = 0; j < sizeOut; j++) { data2[j] = (double) data2f[j]; } /* Clean-up memory on device and host */ free(data1f); free(data2f); hipFree(data1f_gpu); hipFree(data2f_gpu); } int main() { double *pIn, *pOut; int sizeIn, sizeOut; int i; sizeIn = 2; sizeOut = 1; pIn = (double*)malloc(sizeof(double)*sizeIn); pOut = (double*)malloc(sizeof(double)*sizeOut); pIn[0] = 3; pIn[1] = 4; //pIn[2] = 3; square_host(pIn, pOut, sizeIn, sizeOut); printf("output square result"); for (i = 0; i < sizeOut; i++) { printf(" pOut[%d] = %lf, ", i, pOut[i]); } printf("\n"); printf("output norm result"); for (i = 0; i < sizeOut; i++) { //pOut[i] = sqrt(pOut[i]); printf("squre of pOut[%d] = %lf, ", i, pOut[i]); } printf("\n"); free(pIn); free(pOut); return 0; } /* Gateway function */ /* void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int i, j, m, n; double *data1, *data2; float *data1f, *data2f; float *data1f_gpu, *data2f_gpu; mxClassID category; if (nrhs != nlhs) mexErrMsgTxt("The number of input and output arguments must be the same."); //Create an mxArray for the output data by buyu //plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL); //create a float host output variable for float //data2f = (float *) mxMalloc(sizeof(float)); for (i = 0; i < nrhs; i++) { // Find the dimensions of the data m = mxGetM(prhs[i]); n = mxGetN(prhs[i]); //Create an mxArray for the output data //plhs[i] = mxCreateDoubleMatrix(m, n, mxREAL); plhs[i] = mxCreateDoubleMatrix(1, 1, mxREAL); // Create an input and output data array on the GPU hipMalloc( (void **) &data1f_gpu,sizeof(float)*m*n); //hipMalloc( (void **) &data2f_gpu,sizeof(float)*m*n); hipMalloc( (void **) &data2f_gpu,sizeof(float)); // Retrieve the input data data1 = mxGetPr(prhs[i]); // Check if the input array is single or double precision category = mxGetClassID(prhs[i]); if( category == mxSINGLE_CLASS) { // The input array is single precision, it can be sent directly to the card hipMemcpy( data1f_gpu, data1, sizeof(float)*m*n, hipMemcpyHostToDevice); } if( category == mxDOUBLE_CLASS) { // The input array is in double precision, it needs to be converted t floats before being sent to the card data1f = (float *) mxMalloc(sizeof(float)*m*n); for (j = 0; j < m*n; j++) { data1f[j] = (float) data1[j]; } printf("before copyHost to device \n"); hipMemcpy( data1f_gpu, data1f, sizeof(float)*n*m, hipMemcpyHostToDevice); } //orginal output //data2f = (float *) mxMalloc(sizeof(float)*m*n); data2f = (float *) mxMalloc(sizeof(float)); // Compute execution configuration using 128 threads per block dim3 dimBlock(128); dim3 dimGrid((m*n)/dimBlock.x); if ( (n*m) % 128 !=0 ) dimGrid.x+=1; printf("before calling GPU \n"); // Call function on GPU square_elements<<<dimGrid,dimBlock>>>(data1f_gpu, data2f_gpu, n*m); printf("before copy result back \n"); // Copy result back to host //hipMemcpy( data2f, data2f_gpu, sizeof(float)*n*m, hipMemcpyDeviceToHost); hipMemcpy( data2f, data2f_gpu, sizeof(float), hipMemcpyDeviceToHost); // Create a pointer to the output data data2 = mxGetPr(plhs[i]); // Convert from single to double before returning //for (j = 0; j < m*n; j++) //{ //data2[j] = (double) data2f[j]; //} printf("before return result to matlab \n"); data2[0] = 0; data2[0] = (double) data2f[0]; // Clean-up memory on device and host mxFree(data1f); mxFree(data2f); hipFree(data1f_gpu); hipFree(data2f_gpu); }// for i } */
3d78e62b1e87763dd3eca4bb2986c4d2c06cfa3f.cu
#include "cuda.h" #include <stdio.h> //#include "mex.h" /* Kernel to square elements of the array on the GPU */ __global__ void norm_elements(float* in, float* out, unsigned int N) { __shared__ float vOut[16]; int idx = blockIdx.x*blockDim.x+threadIdx.x; if ( idx < N)vOut[idx] = in[idx]*in[idx]; __syncthreads(); if(idx == 0) { out[0] = 0; int i; for ( i = 0; i < N; i++ ) { out[0] += vOut[i]; } } __syncthreads(); if(idx == 0) { out[0] = sqrt(out[0]); } } void square_host(double* pIn, double *pOut, int sizeIn, int sizeOut) { int i, j; double *data1, *data2; float *data1f, *data2f; float *data1f_gpu, *data2f_gpu; int sizeBlock; sizeBlock = 16; data1 = pIn; data2 = pOut; /* Find the dimensions of the data */ /* Create an mxArray for the output data */ /* Create an input and output data array on the GPU*/ cudaMalloc( (void **) &data1f_gpu,sizeof(float)*sizeIn); cudaMalloc( (void **) &data2f_gpu,sizeof(float)*sizeOut); /* Retrieve the input data */ /* Check if the input array is single or double precision */ /* The input array is in double precision, it needs to be converted t floats before being sent to the card */ data1f = (float *) malloc(sizeof(float)*sizeIn); for (j = 0; j < sizeIn; j++) { data1f[j] = (float) data1[j]; } for (i = 0; i < sizeIn; i++) { printf("data1f[%d] = %f, ", i, data1f[i]); } printf("\n"); cudaMemcpy( data1f_gpu, data1f, sizeof(float)*sizeIn, cudaMemcpyHostToDevice); data2f = (float *) malloc(sizeof(float)*sizeOut); //cudaMemcpy( data2f_gpu, data2f, sizeof(float)*sizeOut, cudaMemcpyHostToDevice); /* Compute execution configuration using 128 threads per block */ dim3 dimBlock(sizeBlock); dim3 dimGrid((sizeIn)/dimBlock.x); if ( (sizeIn) % sizeBlock !=0 ) dimGrid.x+=1; /* Call function on GPU */ norm_elements<<<dimGrid,dimBlock>>>(data1f_gpu, data2f_gpu, sizeIn); cudaError_t e; e = cudaGetLastError(); if ( e != cudaSuccess) { fprintf(stderr, "CUDA Error on square_elements: '%s' \n", cudaGetErrorString(e)); exit(-1); } /* Copy result back to host */ cudaMemcpy( data2f, data2f_gpu, sizeof(float)*sizeOut, cudaMemcpyDeviceToHost); for (i = 0; i < sizeOut; i++) { printf("data2f[%d] = %f, ", i, data2f[i]); } printf("\n"); /* Create a pointer to the output data */ /* Convert from single to double before returning */ for (j = 0; j < sizeOut; j++) { data2[j] = (double) data2f[j]; } /* Clean-up memory on device and host */ free(data1f); free(data2f); cudaFree(data1f_gpu); cudaFree(data2f_gpu); } int main() { double *pIn, *pOut; int sizeIn, sizeOut; int i; sizeIn = 2; sizeOut = 1; pIn = (double*)malloc(sizeof(double)*sizeIn); pOut = (double*)malloc(sizeof(double)*sizeOut); pIn[0] = 3; pIn[1] = 4; //pIn[2] = 3; square_host(pIn, pOut, sizeIn, sizeOut); printf("output square result"); for (i = 0; i < sizeOut; i++) { printf(" pOut[%d] = %lf, ", i, pOut[i]); } printf("\n"); printf("output norm result"); for (i = 0; i < sizeOut; i++) { //pOut[i] = sqrt(pOut[i]); printf("squre of pOut[%d] = %lf, ", i, pOut[i]); } printf("\n"); free(pIn); free(pOut); return 0; } /* Gateway function */ /* void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int i, j, m, n; double *data1, *data2; float *data1f, *data2f; float *data1f_gpu, *data2f_gpu; mxClassID category; if (nrhs != nlhs) mexErrMsgTxt("The number of input and output arguments must be the same."); //Create an mxArray for the output data by buyu //plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL); //create a float host output variable for float //data2f = (float *) mxMalloc(sizeof(float)); for (i = 0; i < nrhs; i++) { // Find the dimensions of the data m = mxGetM(prhs[i]); n = mxGetN(prhs[i]); //Create an mxArray for the output data //plhs[i] = mxCreateDoubleMatrix(m, n, mxREAL); plhs[i] = mxCreateDoubleMatrix(1, 1, mxREAL); // Create an input and output data array on the GPU cudaMalloc( (void **) &data1f_gpu,sizeof(float)*m*n); //cudaMalloc( (void **) &data2f_gpu,sizeof(float)*m*n); cudaMalloc( (void **) &data2f_gpu,sizeof(float)); // Retrieve the input data data1 = mxGetPr(prhs[i]); // Check if the input array is single or double precision category = mxGetClassID(prhs[i]); if( category == mxSINGLE_CLASS) { // The input array is single precision, it can be sent directly to the card cudaMemcpy( data1f_gpu, data1, sizeof(float)*m*n, cudaMemcpyHostToDevice); } if( category == mxDOUBLE_CLASS) { // The input array is in double precision, it needs to be converted t floats before being sent to the card data1f = (float *) mxMalloc(sizeof(float)*m*n); for (j = 0; j < m*n; j++) { data1f[j] = (float) data1[j]; } printf("before copyHost to device \n"); cudaMemcpy( data1f_gpu, data1f, sizeof(float)*n*m, cudaMemcpyHostToDevice); } //orginal output //data2f = (float *) mxMalloc(sizeof(float)*m*n); data2f = (float *) mxMalloc(sizeof(float)); // Compute execution configuration using 128 threads per block dim3 dimBlock(128); dim3 dimGrid((m*n)/dimBlock.x); if ( (n*m) % 128 !=0 ) dimGrid.x+=1; printf("before calling GPU \n"); // Call function on GPU square_elements<<<dimGrid,dimBlock>>>(data1f_gpu, data2f_gpu, n*m); printf("before copy result back \n"); // Copy result back to host //cudaMemcpy( data2f, data2f_gpu, sizeof(float)*n*m, cudaMemcpyDeviceToHost); cudaMemcpy( data2f, data2f_gpu, sizeof(float), cudaMemcpyDeviceToHost); // Create a pointer to the output data data2 = mxGetPr(plhs[i]); // Convert from single to double before returning //for (j = 0; j < m*n; j++) //{ //data2[j] = (double) data2f[j]; //} printf("before return result to matlab \n"); data2[0] = 0; data2[0] = (double) data2f[0]; // Clean-up memory on device and host mxFree(data1f); mxFree(data2f); cudaFree(data1f_gpu); cudaFree(data2f_gpu); }// for i } */
b88828aae5d9b4fbacb52d9369eaaa9b1857a5dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // MIT License // Copyright (c) 2019 - Daniel Peter Playne // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // Headers #include <stdio.h> #include <sys/time.h> // Project Headers #include "utils.h" #include "cuda_utils.cuh" #include "reduction.cuh" // Device Functions __global__ void init_labels (unsigned int* g_labels, const unsigned char *g_image); __global__ void resolve_labels (unsigned int* g_labels); __global__ void label_equivalence(unsigned int *g_labels, const unsigned char *g_image, bool *changed); // Image Size (Device Constant) __constant__ unsigned int cX, cY, cZ, cXYZ; __constant__ unsigned int pX, pY; // Main Method int main(int argc,char **argv) { // Check Arguments if(argc < 3) { printf("Usage: ./label_equivalence_direct_3D_clamp <gpu> <file0> <file1> ...\n"); exit(1); } // Initialise device hipSetDevice(atoi(argv[1])); // CUDA Streams hipStream_t stream1, stream2; hipStreamCreate(&stream1); hipStreamCreate(&stream2); // For each input for(int f = 2; f < argc; f++) { // Read Data from file unsigned int X, Y, Z; unsigned char *h_image = readPG3D(argv[f], X, Y, Z); // Calculate Data Mean unsigned char image_mean = mean(h_image, X*Y*Z); // Convert Data to Binary threshold(h_image, image_mean, X*Y*Z); // Number of Voxels unsigned int XYZ = X*Y*Z; // Calculate Pitch unsigned int PX = X; unsigned int PY = X*Y; // Set block size dim3 block(32, 4, 4); dim3 grid(ceil(X/(float)block.x), ceil(Y/(float)block.y), ceil(Z/(float)block.z)); // Allocate host memory unsigned int *h_labels = new unsigned int[X*Y*Z]; unsigned int *d_labels; unsigned char *d_image; bool *d_changed; // Allocate device memory hipMalloc((void**) &d_labels, X*Y*Z*sizeof(unsigned int)); hipMalloc((void**) &d_image, X*Y*Z*sizeof(unsigned char)); hipMalloc((void**) &d_changed, sizeof(bool)); // Copy host to device memory hipMemcpyToSymbol(cX, &X, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(cY, &Y, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(cZ, &Z, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(cXYZ, &XYZ, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(pX, &PX, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(pY, &PY, sizeof(unsigned int), 0, hipMemcpyHostToDevice); hipMemcpy(d_image, h_image, X*Y*Z*sizeof(unsigned char), hipMemcpyHostToDevice); // Timing const int N = 100; float *times = new float[N]; hipEvent_t time_event[2]; hipEventCreate(&time_event[0]); hipEventCreate(&time_event[1]); // Run N times for(int i = 0; i < N; i++) { // Record start time hipEventRecord(time_event[0]); // Initialise labels hipLaunchKernelGGL(( init_labels) , dim3(grid), dim3(block) , 0, 0, d_labels, d_image); // Resolve the labels hipLaunchKernelGGL(( resolve_labels) , dim3(grid), dim3(block) , 0, 0, d_labels); hipDeviceSynchronize(); // Changed Flag bool changed = true; // While labels have changed while(changed) { // Copy changed to device hipMemset(d_changed, 0, 1); // Label image hipLaunchKernelGGL(( label_equivalence) , dim3(grid), dim3(block), 0, stream1 , d_labels, d_image, d_changed); // Copy changed back hipMemcpyAsync(&changed, d_changed, sizeof(bool), hipMemcpyDeviceToHost, stream1); // Resolve the labels hipLaunchKernelGGL(( resolve_labels) , dim3(grid), dim3(block), 0, stream2 , d_labels); hipDeviceSynchronize(); } // Record end event hipEventRecord(time_event[1]); hipDeviceSynchronize(); // Calculate Elapsed Time hipEventElapsedTime(&times[i], time_event[0], time_event[1]); } // Copy labels back to host hipMemcpy(h_labels, d_labels, X*Y*Z*sizeof(unsigned int), hipMemcpyDeviceToHost); // Check for any errors checkCUDAErrors(); // Print Number of Components printf("Number of Components (%s): %u\n", argv[f], count_components(h_labels, X*Y*Z)); // Measure the time the algorithm took print_mean_sd(times, N, X); // Delete memory delete[] h_image; delete[] h_labels; delete[] times; hipFree(d_labels); hipFree(d_image); hipFree(d_changed); // Delete Events hipEventDestroy(time_event[0]); hipEventDestroy(time_event[1]); } // Destroy CUDA Streams hipStreamDestroy(stream1); hipStreamDestroy(stream2); } //------------------------------------------------------------------------------------------------------------------------ // Device Functions //------------------------------------------------------------------------------------------------------------------------ // Initialise Kernel __global__ void init_labels(unsigned int* g_labels, const unsigned char *g_image) { // Calculate index const unsigned int ix = (blockIdx.x * blockDim.x) + threadIdx.x; const unsigned int iy = (blockIdx.y * blockDim.y) + threadIdx.y; const unsigned int iz = (blockIdx.z * blockDim.z) + threadIdx.z; // Check Range if((ix < cX) && (iy < cY) && (iz < cZ)) { // Load image const unsigned char pzyx = g_image[iz*pY + iy*pX + ix]; // Neighbour Connections const bool nzm1yx = (iz > 0) ? (pzyx == g_image[(iz-1)*pY + iy *pX + ix ]) : false; const bool nzym1x = (iy > 0) ? (pzyx == g_image[ iz *pY + (iy-1)*pX + ix ]) : false; const bool nzyxm1 = (ix > 0) ? (pzyx == g_image[ iz *pY + iy *pX + ix-1]) : false; // Label unsigned int label; // Initialise Label label = (nzyxm1) ? ( iz*pY + iy*pX + ix-1) : (iz*pY + iy*pX + ix); label = (nzym1x) ? ( iz*pY + (iy-1)*pX + ix) : label; label = (nzm1yx) ? ((iz-1)*pY + iy*pX + ix) : label; // Write to Global Memory g_labels[iz*pY + iy*pX + ix] = label; } } // Resolve Kernel __global__ void resolve_labels(unsigned int *g_labels) { // Calculate index const unsigned int id = ((blockIdx.z * blockDim.z) + threadIdx.z) * pY + ((blockIdx.y * blockDim.y) + threadIdx.y) * pX + ((blockIdx.x * blockDim.x) + threadIdx.x); // Check Thread Range if(id < cXYZ) { // Resolve Label g_labels[id] = find_root(g_labels, g_labels[id]); } } // Label Kernel __global__ void label_equivalence(unsigned int *g_labels, const unsigned char *g_image, bool *changed) { // Calculate index const unsigned int ix = (blockIdx.x * blockDim.x) + threadIdx.x; const unsigned int iy = (blockIdx.y * blockDim.y) + threadIdx.y; const unsigned int iz = (blockIdx.z * blockDim.z) + threadIdx.z; // Check Range if((ix < cX) && (iy < cY) && (iz < cZ)) { // Get image and label values const unsigned char pzyx = g_image[iz*pY + iy*pX + ix]; // Neighbouring indexes const unsigned int xm1 = ix-1; const unsigned int xp1 = ix+1; const unsigned int ym1 = iy-1; const unsigned int yp1 = iy+1; const unsigned int zm1 = iz-1; const unsigned int zp1 = iz+1; // Get neighbour labels const unsigned int lzm1yx = (iz > 0) ? g_labels[zm1*pY + iy*pX + ix] : 0; const unsigned int lzym1x = (iy > 0) ? g_labels[ iz*pY + ym1*pX + ix] : 0; const unsigned int lzyxm1 = (ix > 0) ? g_labels[ iz*pY + iy*pX + xm1] : 0; const unsigned int lzyx = g_labels[ iz*pY + iy*pX + ix]; const unsigned int lzyxp1 = (ix < cX-1) ? g_labels[ iz*pY + iy*pX + xp1] : 0; const unsigned int lzyp1x = (iy < cY-1) ? g_labels[ iz*pY + yp1*pX + ix] : 0; const unsigned int lzp1yx = (iz < cZ-1) ? g_labels[zp1*pY + iy*pX + ix] : 0; const bool nzm1yx = (iz > 0) ? (pzyx == g_image[zm1*pY + iy*pX + ix]) : false; const bool nzym1x = (iy > 0) ? (pzyx == g_image[ iz*pY + ym1*pX + ix]) : false; const bool nzyxm1 = (ix > 0) ? (pzyx == g_image[ iz*pY + iy*pX + xm1]) : false; const bool nzyxp1 = (ix < cX-1) ? (pzyx == g_image[ iz*pY + iy*pX + xp1]) : false; const bool nzyp1x = (iy < cY-1) ? (pzyx == g_image[ iz*pY + yp1*pX + ix]) : false; const bool nzp1yx = (iz < cZ-1) ? (pzyx == g_image[zp1*pY + iy*pX + ix]) : false; // Lowest label unsigned int label = lzyx; // Find lowest neighbouring label label = ((nzm1yx) && (lzm1yx < label)) ? lzm1yx : label; label = ((nzym1x) && (lzym1x < label)) ? lzym1x : label; label = ((nzyxm1) && (lzyxm1 < label)) ? lzyxm1 : label; label = ((nzyxp1) && (lzyxp1 < label)) ? lzyxp1 : label; label = ((nzyp1x) && (lzyp1x < label)) ? lzyp1x : label; label = ((nzp1yx) && (lzp1yx < label)) ? lzp1yx : label; // If labels are different, resolve them if(label < lzyx) { // Update label // Nonatomic write may overwrite another label but on average seems to give faster results g_labels[lzyx] = label; // Record the change changed[0] = true; } } }
b88828aae5d9b4fbacb52d9369eaaa9b1857a5dd.cu
// MIT License // Copyright (c) 2019 - Daniel Peter Playne // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // Headers #include <stdio.h> #include <sys/time.h> // Project Headers #include "utils.h" #include "cuda_utils.cuh" #include "reduction.cuh" // Device Functions __global__ void init_labels (unsigned int* g_labels, const unsigned char *g_image); __global__ void resolve_labels (unsigned int* g_labels); __global__ void label_equivalence(unsigned int *g_labels, const unsigned char *g_image, bool *changed); // Image Size (Device Constant) __constant__ unsigned int cX, cY, cZ, cXYZ; __constant__ unsigned int pX, pY; // Main Method int main(int argc,char **argv) { // Check Arguments if(argc < 3) { printf("Usage: ./label_equivalence_direct_3D_clamp <gpu> <file0> <file1> ...\n"); exit(1); } // Initialise device cudaSetDevice(atoi(argv[1])); // CUDA Streams cudaStream_t stream1, stream2; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); // For each input for(int f = 2; f < argc; f++) { // Read Data from file unsigned int X, Y, Z; unsigned char *h_image = readPG3D(argv[f], X, Y, Z); // Calculate Data Mean unsigned char image_mean = mean(h_image, X*Y*Z); // Convert Data to Binary threshold(h_image, image_mean, X*Y*Z); // Number of Voxels unsigned int XYZ = X*Y*Z; // Calculate Pitch unsigned int PX = X; unsigned int PY = X*Y; // Set block size dim3 block(32, 4, 4); dim3 grid(ceil(X/(float)block.x), ceil(Y/(float)block.y), ceil(Z/(float)block.z)); // Allocate host memory unsigned int *h_labels = new unsigned int[X*Y*Z]; unsigned int *d_labels; unsigned char *d_image; bool *d_changed; // Allocate device memory cudaMalloc((void**) &d_labels, X*Y*Z*sizeof(unsigned int)); cudaMalloc((void**) &d_image, X*Y*Z*sizeof(unsigned char)); cudaMalloc((void**) &d_changed, sizeof(bool)); // Copy host to device memory cudaMemcpyToSymbol(cX, &X, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(cY, &Y, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(cZ, &Z, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(cXYZ, &XYZ, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(pX, &PX, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(pY, &PY, sizeof(unsigned int), 0, cudaMemcpyHostToDevice); cudaMemcpy(d_image, h_image, X*Y*Z*sizeof(unsigned char), cudaMemcpyHostToDevice); // Timing const int N = 100; float *times = new float[N]; cudaEvent_t time_event[2]; cudaEventCreate(&time_event[0]); cudaEventCreate(&time_event[1]); // Run N times for(int i = 0; i < N; i++) { // Record start time cudaEventRecord(time_event[0]); // Initialise labels init_labels <<< grid, block >>>(d_labels, d_image); // Resolve the labels resolve_labels <<< grid, block >>>(d_labels); cudaDeviceSynchronize(); // Changed Flag bool changed = true; // While labels have changed while(changed) { // Copy changed to device cudaMemset(d_changed, 0, 1); // Label image label_equivalence <<< grid, block, 0, stream1 >>>(d_labels, d_image, d_changed); // Copy changed back cudaMemcpyAsync(&changed, d_changed, sizeof(bool), cudaMemcpyDeviceToHost, stream1); // Resolve the labels resolve_labels <<< grid, block, 0, stream2 >>>(d_labels); cudaDeviceSynchronize(); } // Record end event cudaEventRecord(time_event[1]); cudaDeviceSynchronize(); // Calculate Elapsed Time cudaEventElapsedTime(&times[i], time_event[0], time_event[1]); } // Copy labels back to host cudaMemcpy(h_labels, d_labels, X*Y*Z*sizeof(unsigned int), cudaMemcpyDeviceToHost); // Check for any errors checkCUDAErrors(); // Print Number of Components printf("Number of Components (%s): %u\n", argv[f], count_components(h_labels, X*Y*Z)); // Measure the time the algorithm took print_mean_sd(times, N, X); // Delete memory delete[] h_image; delete[] h_labels; delete[] times; cudaFree(d_labels); cudaFree(d_image); cudaFree(d_changed); // Delete Events cudaEventDestroy(time_event[0]); cudaEventDestroy(time_event[1]); } // Destroy CUDA Streams cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); } //------------------------------------------------------------------------------------------------------------------------ // Device Functions //------------------------------------------------------------------------------------------------------------------------ // Initialise Kernel __global__ void init_labels(unsigned int* g_labels, const unsigned char *g_image) { // Calculate index const unsigned int ix = (blockIdx.x * blockDim.x) + threadIdx.x; const unsigned int iy = (blockIdx.y * blockDim.y) + threadIdx.y; const unsigned int iz = (blockIdx.z * blockDim.z) + threadIdx.z; // Check Range if((ix < cX) && (iy < cY) && (iz < cZ)) { // Load image const unsigned char pzyx = g_image[iz*pY + iy*pX + ix]; // Neighbour Connections const bool nzm1yx = (iz > 0) ? (pzyx == g_image[(iz-1)*pY + iy *pX + ix ]) : false; const bool nzym1x = (iy > 0) ? (pzyx == g_image[ iz *pY + (iy-1)*pX + ix ]) : false; const bool nzyxm1 = (ix > 0) ? (pzyx == g_image[ iz *pY + iy *pX + ix-1]) : false; // Label unsigned int label; // Initialise Label label = (nzyxm1) ? ( iz*pY + iy*pX + ix-1) : (iz*pY + iy*pX + ix); label = (nzym1x) ? ( iz*pY + (iy-1)*pX + ix) : label; label = (nzm1yx) ? ((iz-1)*pY + iy*pX + ix) : label; // Write to Global Memory g_labels[iz*pY + iy*pX + ix] = label; } } // Resolve Kernel __global__ void resolve_labels(unsigned int *g_labels) { // Calculate index const unsigned int id = ((blockIdx.z * blockDim.z) + threadIdx.z) * pY + ((blockIdx.y * blockDim.y) + threadIdx.y) * pX + ((blockIdx.x * blockDim.x) + threadIdx.x); // Check Thread Range if(id < cXYZ) { // Resolve Label g_labels[id] = find_root(g_labels, g_labels[id]); } } // Label Kernel __global__ void label_equivalence(unsigned int *g_labels, const unsigned char *g_image, bool *changed) { // Calculate index const unsigned int ix = (blockIdx.x * blockDim.x) + threadIdx.x; const unsigned int iy = (blockIdx.y * blockDim.y) + threadIdx.y; const unsigned int iz = (blockIdx.z * blockDim.z) + threadIdx.z; // Check Range if((ix < cX) && (iy < cY) && (iz < cZ)) { // Get image and label values const unsigned char pzyx = g_image[iz*pY + iy*pX + ix]; // Neighbouring indexes const unsigned int xm1 = ix-1; const unsigned int xp1 = ix+1; const unsigned int ym1 = iy-1; const unsigned int yp1 = iy+1; const unsigned int zm1 = iz-1; const unsigned int zp1 = iz+1; // Get neighbour labels const unsigned int lzm1yx = (iz > 0) ? g_labels[zm1*pY + iy*pX + ix] : 0; const unsigned int lzym1x = (iy > 0) ? g_labels[ iz*pY + ym1*pX + ix] : 0; const unsigned int lzyxm1 = (ix > 0) ? g_labels[ iz*pY + iy*pX + xm1] : 0; const unsigned int lzyx = g_labels[ iz*pY + iy*pX + ix]; const unsigned int lzyxp1 = (ix < cX-1) ? g_labels[ iz*pY + iy*pX + xp1] : 0; const unsigned int lzyp1x = (iy < cY-1) ? g_labels[ iz*pY + yp1*pX + ix] : 0; const unsigned int lzp1yx = (iz < cZ-1) ? g_labels[zp1*pY + iy*pX + ix] : 0; const bool nzm1yx = (iz > 0) ? (pzyx == g_image[zm1*pY + iy*pX + ix]) : false; const bool nzym1x = (iy > 0) ? (pzyx == g_image[ iz*pY + ym1*pX + ix]) : false; const bool nzyxm1 = (ix > 0) ? (pzyx == g_image[ iz*pY + iy*pX + xm1]) : false; const bool nzyxp1 = (ix < cX-1) ? (pzyx == g_image[ iz*pY + iy*pX + xp1]) : false; const bool nzyp1x = (iy < cY-1) ? (pzyx == g_image[ iz*pY + yp1*pX + ix]) : false; const bool nzp1yx = (iz < cZ-1) ? (pzyx == g_image[zp1*pY + iy*pX + ix]) : false; // Lowest label unsigned int label = lzyx; // Find lowest neighbouring label label = ((nzm1yx) && (lzm1yx < label)) ? lzm1yx : label; label = ((nzym1x) && (lzym1x < label)) ? lzym1x : label; label = ((nzyxm1) && (lzyxm1 < label)) ? lzyxm1 : label; label = ((nzyxp1) && (lzyxp1 < label)) ? lzyxp1 : label; label = ((nzyp1x) && (lzyp1x < label)) ? lzyp1x : label; label = ((nzp1yx) && (lzp1yx < label)) ? lzp1yx : label; // If labels are different, resolve them if(label < lzyx) { // Update label // Nonatomic write may overwrite another label but on average seems to give faster results g_labels[lzyx] = label; // Record the change changed[0] = true; } } }
8036d8190891015ad1807647f1337fc5e3e95505.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/book.h" int main( void ) { hipDeviceProp_t prop; int count; HANDLE_ERROR( hipGetDeviceCount( &count ) ); for (int i=0; i< count; i++) { HANDLE_ERROR( hipGetDeviceProperties( &prop, i ) ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n"); printf( "Kernel execution timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } }
8036d8190891015ad1807647f1337fc5e3e95505.cu
#include "../common/book.h" int main( void ) { cudaDeviceProp prop; int count; HANDLE_ERROR( cudaGetDeviceCount( &count ) ); for (int i=0; i< count; i++) { HANDLE_ERROR( cudaGetDeviceProperties( &prop, i ) ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n"); printf( "Kernel execution timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } }
9ac419ff328531ea8fc1a8cdad1de7d5223c70a9.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <helper_cuda.h> #include <helper_timer.h> #include "imgutils.h" #define RADIUS 1 #define FILTER_SIZE ((RADIUS * 2) + 1) #define BLOCK_SIZE 16 #define ITERATIONS 128 #define PRINT 1 #define RANDOM 1 // Constant memory for filter // Since constant memory is read only and has its own cache, this improves the // speed of accessing the filter __constant__ float c_filter[FILTER_SIZE*FILTER_SIZE]; __global__ void kernel(float* d_in, int height, int width, float* d_out) { // Get global position in grid unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; // actual location within image data // since image data is interleaved RGB values, offset like you would a 2D // image, multiply that by the number of channels (3) and add the z value // representing whether the pixel is R, G, or B unsigned int loc = (y * width) + x; // Shared memory block is big enough for the full block plus one radius of // pixels on all sides __shared__ float sh_data[BLOCK_SIZE + 2*RADIUS][BLOCK_SIZE + 2*RADIUS]; // Copy to shared memory // It would be trivial for each thread to bring its corresponding pixel into // shared memory but the pixels in the radius around each block must also be // copied to shared memory. // // So, the following scheme is used to bring all pixels in. // // Each pixel copies four pixels, which are the four corners one RADIUS away // For example, with RADIUS=2, O is this thread's pixel and the X's are the // pixels brought into shared memory // 0 1 2 3 4 // //0 X X //1 //2 O //3 //4 X X // // Note that RADIUS must be <= BLOCK_SIZE for the scheme to work. // If RADIUS > BLOCK_SIZE, each thread must pull in more than 4 pixels, // which requires a more expensive copying scheme. int x_tmp = x - RADIUS; int y_tmp = y - RADIUS; sh_data[threadIdx.x][threadIdx.y] = (x_tmp < 0 || y_tmp < 0) ? 0 : d_in[loc - RADIUS - width*RADIUS]; x_tmp = x + RADIUS; sh_data[threadIdx.x + 2*RADIUS][threadIdx.y] = (x_tmp >= width || y_tmp < 0) ? 0 : d_in[loc + RADIUS - width*RADIUS]; x_tmp = x - RADIUS; y_tmp = y + RADIUS; sh_data[threadIdx.x][threadIdx.y + 2*RADIUS] = (x_tmp < 0 || y_tmp >= height) ? 0 : d_in[loc - RADIUS + width*RADIUS]; x_tmp = x + RADIUS; sh_data[threadIdx.x + 2*RADIUS][threadIdx.y + 2*RADIUS] = (x_tmp >= width || y_tmp >= height) ? 0 : d_in[loc + RADIUS + width*RADIUS]; __syncthreads(); // sum of all element-wise multiplications float sum = 0; // only perform convolution on pixels within radius // Global memory use and O(N^2) loop in kernel kill performance if (x >= RADIUS && y >= RADIUS && x < (width - RADIUS) && y < (height - RADIUS)) { #pragma unroll for (int i = -RADIUS; i <= RADIUS; ++i) { #pragma unroll for (int j = -RADIUS; j <= RADIUS; ++j) { // filter location based just on x and y int filt_x = i + RADIUS; int filt_y = j + RADIUS; int filter_loc = filt_y * FILTER_SIZE + filt_x; // add element-wise product to accumulator sum += sh_data[threadIdx.x+RADIUS+i][threadIdx.y+RADIUS+j] * c_filter[filter_loc]; } } // add pixel value to output d_out[loc] = sum; } } int main(int argc, char** argv) { if (argc < 2) { printf("Usage: ./naive_conv <image>\n"); return 0; } // read in image cv::Mat h_in = read_image_bw(argv[1]); int height = h_in.rows; int width = h_in.cols; // Declare image and filter variables for host and device float *h_filter, *h_out, *d_in, *d_out; // size to allocate for image and filter variables unsigned int img_size = width * height * sizeof(float); unsigned int full_filter_size = FILTER_SIZE * FILTER_SIZE * sizeof(float); #if PRINT printf("img_size=%u, full_filter_size=%u\n", img_size, full_filter_size); #endif // Allocate host data h_filter = (float*)malloc(full_filter_size); h_out = (float*)malloc(img_size); // copy filter template to actual filter (maybe redundant) #if RANDOM srand(200); #else // Initialize filter template // clang-format off const float filt_template[FILTER_SIZE][FILTER_SIZE] = { {1, 1, 1}, {1, -8, 1}, {1, 1, 1} }; // clang-format on #endif for (int row = 0; row < FILTER_SIZE; ++row) { for (int col = 0; col < FILTER_SIZE; ++col) { int idx = row * FILTER_SIZE + col; #if RANDOM h_filter[idx] = (float)(rand() % 16); #else h_filter[idx] = filt_template[row][col]; #endif } } // Allocate device data checkCudaErrors(hipMalloc((void**)&d_in, img_size)); checkCudaErrors(hipMalloc((void**)&d_out, img_size)); // Copy host memory to device checkCudaErrors(hipMemcpy(d_in, h_in.data, img_size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(c_filter, h_filter, full_filter_size)); // Let grid size be based on block size // Have just enough blocks to cover whole image // The -1 is to cover the case where image dimensions are multiples of // BLOCKS_SIZE int gridXSize = 1 + ((width - 1) / BLOCK_SIZE); int gridYSize = 1 + ((height - 1) / BLOCK_SIZE); #if PRINT printf("gridXSize=%d, gridYSize=%d, BLOCK_SIZE=%d\n", gridXSize, gridYSize, BLOCK_SIZE); #endif dim3 h_gridDim(gridXSize, gridYSize); dim3 h_blockDim(BLOCK_SIZE, BLOCK_SIZE); // Run on GPU 0 hipSetDevice(0); // Timing stuff StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); // Kernel call // i=-1 is the warm up iteration for (int i = -1; i < ITERATIONS; ++i) { if (i == 0) { checkCudaErrors(hipDeviceSynchronize()); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); } hipLaunchKernelGGL(( kernel), dim3(h_gridDim), dim3(h_blockDim), 0, 0, d_in, height, width, d_out); } // Get time checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&hTimer); double time = sdkGetTimerValue(&hTimer) / (double)ITERATIONS; printf("Kernel time = %.5f ms\n", time); int nBlocks = gridXSize*gridYSize; int nThreads = nBlocks*BLOCK_SIZE*BLOCK_SIZE; printf("#Blocks=%d, #Threads=%d, Time/Thread=%f\n", nBlocks, nThreads, time*1000000.0/(double)nThreads); // Copy result back to host checkCudaErrors(hipMemcpy(h_out, d_out, img_size, hipMemcpyDeviceToHost)); // write image to file save_image_bw("output.png", h_out, height, width); // Free device data hipFree(d_in); hipFree(d_out); // Free host data free(h_filter); free(h_out); }
9ac419ff328531ea8fc1a8cdad1de7d5223c70a9.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <helper_cuda.h> #include <helper_timer.h> #include "imgutils.h" #define RADIUS 1 #define FILTER_SIZE ((RADIUS * 2) + 1) #define BLOCK_SIZE 16 #define ITERATIONS 128 #define PRINT 1 #define RANDOM 1 // Constant memory for filter // Since constant memory is read only and has its own cache, this improves the // speed of accessing the filter __constant__ float c_filter[FILTER_SIZE*FILTER_SIZE]; __global__ void kernel(float* d_in, int height, int width, float* d_out) { // Get global position in grid unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; // actual location within image data // since image data is interleaved RGB values, offset like you would a 2D // image, multiply that by the number of channels (3) and add the z value // representing whether the pixel is R, G, or B unsigned int loc = (y * width) + x; // Shared memory block is big enough for the full block plus one radius of // pixels on all sides __shared__ float sh_data[BLOCK_SIZE + 2*RADIUS][BLOCK_SIZE + 2*RADIUS]; // Copy to shared memory // It would be trivial for each thread to bring its corresponding pixel into // shared memory but the pixels in the radius around each block must also be // copied to shared memory. // // So, the following scheme is used to bring all pixels in. // // Each pixel copies four pixels, which are the four corners one RADIUS away // For example, with RADIUS=2, O is this thread's pixel and the X's are the // pixels brought into shared memory // 0 1 2 3 4 // //0 X X //1 //2 O //3 //4 X X // // Note that RADIUS must be <= BLOCK_SIZE for the scheme to work. // If RADIUS > BLOCK_SIZE, each thread must pull in more than 4 pixels, // which requires a more expensive copying scheme. int x_tmp = x - RADIUS; int y_tmp = y - RADIUS; sh_data[threadIdx.x][threadIdx.y] = (x_tmp < 0 || y_tmp < 0) ? 0 : d_in[loc - RADIUS - width*RADIUS]; x_tmp = x + RADIUS; sh_data[threadIdx.x + 2*RADIUS][threadIdx.y] = (x_tmp >= width || y_tmp < 0) ? 0 : d_in[loc + RADIUS - width*RADIUS]; x_tmp = x - RADIUS; y_tmp = y + RADIUS; sh_data[threadIdx.x][threadIdx.y + 2*RADIUS] = (x_tmp < 0 || y_tmp >= height) ? 0 : d_in[loc - RADIUS + width*RADIUS]; x_tmp = x + RADIUS; sh_data[threadIdx.x + 2*RADIUS][threadIdx.y + 2*RADIUS] = (x_tmp >= width || y_tmp >= height) ? 0 : d_in[loc + RADIUS + width*RADIUS]; __syncthreads(); // sum of all element-wise multiplications float sum = 0; // only perform convolution on pixels within radius // Global memory use and O(N^2) loop in kernel kill performance if (x >= RADIUS && y >= RADIUS && x < (width - RADIUS) && y < (height - RADIUS)) { #pragma unroll for (int i = -RADIUS; i <= RADIUS; ++i) { #pragma unroll for (int j = -RADIUS; j <= RADIUS; ++j) { // filter location based just on x and y int filt_x = i + RADIUS; int filt_y = j + RADIUS; int filter_loc = filt_y * FILTER_SIZE + filt_x; // add element-wise product to accumulator sum += sh_data[threadIdx.x+RADIUS+i][threadIdx.y+RADIUS+j] * c_filter[filter_loc]; } } // add pixel value to output d_out[loc] = sum; } } int main(int argc, char** argv) { if (argc < 2) { printf("Usage: ./naive_conv <image>\n"); return 0; } // read in image cv::Mat h_in = read_image_bw(argv[1]); int height = h_in.rows; int width = h_in.cols; // Declare image and filter variables for host and device float *h_filter, *h_out, *d_in, *d_out; // size to allocate for image and filter variables unsigned int img_size = width * height * sizeof(float); unsigned int full_filter_size = FILTER_SIZE * FILTER_SIZE * sizeof(float); #if PRINT printf("img_size=%u, full_filter_size=%u\n", img_size, full_filter_size); #endif // Allocate host data h_filter = (float*)malloc(full_filter_size); h_out = (float*)malloc(img_size); // copy filter template to actual filter (maybe redundant) #if RANDOM srand(200); #else // Initialize filter template // clang-format off const float filt_template[FILTER_SIZE][FILTER_SIZE] = { {1, 1, 1}, {1, -8, 1}, {1, 1, 1} }; // clang-format on #endif for (int row = 0; row < FILTER_SIZE; ++row) { for (int col = 0; col < FILTER_SIZE; ++col) { int idx = row * FILTER_SIZE + col; #if RANDOM h_filter[idx] = (float)(rand() % 16); #else h_filter[idx] = filt_template[row][col]; #endif } } // Allocate device data checkCudaErrors(cudaMalloc((void**)&d_in, img_size)); checkCudaErrors(cudaMalloc((void**)&d_out, img_size)); // Copy host memory to device checkCudaErrors(cudaMemcpy(d_in, h_in.data, img_size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(c_filter, h_filter, full_filter_size)); // Let grid size be based on block size // Have just enough blocks to cover whole image // The -1 is to cover the case where image dimensions are multiples of // BLOCKS_SIZE int gridXSize = 1 + ((width - 1) / BLOCK_SIZE); int gridYSize = 1 + ((height - 1) / BLOCK_SIZE); #if PRINT printf("gridXSize=%d, gridYSize=%d, BLOCK_SIZE=%d\n", gridXSize, gridYSize, BLOCK_SIZE); #endif dim3 h_gridDim(gridXSize, gridYSize); dim3 h_blockDim(BLOCK_SIZE, BLOCK_SIZE); // Run on GPU 0 cudaSetDevice(0); // Timing stuff StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); // Kernel call // i=-1 is the warm up iteration for (int i = -1; i < ITERATIONS; ++i) { if (i == 0) { checkCudaErrors(cudaDeviceSynchronize()); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); } kernel<<<h_gridDim, h_blockDim>>>(d_in, height, width, d_out); } // Get time checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&hTimer); double time = sdkGetTimerValue(&hTimer) / (double)ITERATIONS; printf("Kernel time = %.5f ms\n", time); int nBlocks = gridXSize*gridYSize; int nThreads = nBlocks*BLOCK_SIZE*BLOCK_SIZE; printf("#Blocks=%d, #Threads=%d, Time/Thread=%f\n", nBlocks, nThreads, time*1000000.0/(double)nThreads); // Copy result back to host checkCudaErrors(cudaMemcpy(h_out, d_out, img_size, cudaMemcpyDeviceToHost)); // write image to file save_image_bw("output.png", h_out, height, width); // Free device data cudaFree(d_in); cudaFree(d_out); // Free host data free(h_filter); free(h_out); }
2a0d3191fab0cae456a0bb5608fcdffc5c7e9d06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * cuda_adaptive_seeding.cu - routines for adaptive seeding on GPUs. */ #include "accelerad_copyright.h" #include <stdio.h> #include <stdlib.h> #define __STDC_FORMAT_MACROS #include <inttypes.h> #include "kmeans.h" //#define PRINT_CUDA #define MULTI_BLOCK #define VALID_HORIZONTAL 0x01 /* Horizontal neighbor quad tree node is valid. */ #define VALID_VERTICAL 0x10 /* Vertical neighbor quad tree node is valid. */ #ifdef CAP_REGISTERS_PER_THREAD #include "accelerad.h" /* This is the maximum number of registers used by any cuda kernel in this in this file, found by using the flag "-Xptxas -v" to compile in nvcc. This should be updated when changes are made to the kernels. */ #ifdef RTX #define REGISTERS_PER_THREAD 36 /* Registers per thread under CUDA 10.0 */ #else #define REGISTERS_PER_THREAD 23 /* Registers per thread under CUDA 7.5 */ #endif #endif #ifdef __cplusplus extern "C" { #endif // Ambient sample distribution based on Wang et al. (2009) "An efficient GPU-based approach for interactive global illumination" __device__ inline static PointDirection average_point_direction(const PointDirection& a, const PointDirection& b, const PointDirection& c, const PointDirection& d) { PointDirection average; average.pos = (a.pos + b.pos + c.pos + d.pos) / 4.0f; //average.dir = optix::normalize(a.dir + b.dir + c.dir + d.dir); average.dir = a.dir + b.dir + c.dir + d.dir; const float length = optix::length(average.dir); if ( length > 0.0f ) average.dir /= length; return average; } __device__ inline static float geometric_error(const PointDirection& a, const PointDirection& b, const float alpha) { return alpha * optix::length(a.pos - b.pos) + sqrtf(2.0f * fmaxf(1.0f - optix::dot(a.dir, b.dir), 0.0f)); } __device__ inline static unsigned int valid_hit(const PointDirection& hit) { return optix::dot(hit.dir, hit.dir) > 0.0f && optix::dot(hit.pos, hit.pos) >= 0.0f; } static int CCALL isPowerOfTwo(unsigned int x) { return ((x != 0) && !(x & (x - 1))); } static unsigned int CCALL calc_block_dim(const unsigned int maxThreadsPerBlock, const unsigned int levels) { unsigned int blockDim = 1u; unsigned int size = maxThreadsPerBlock << 1; while ( size >>= 2 ) blockDim <<= 1; if ( blockDim > (1u << levels) ) blockDim = 1u << levels; return blockDim; } #ifndef MULTI_BLOCK __device__ inline static void reduce(float *error, const int level, const int idX, const int idY, const int width) { int tid = idX + idY * width; unsigned int stride = 1u; float err = error[tid]; for (int i = 0; i < level; i++) { unsigned int stride2 = stride << 1; if (!(idX % stride2) && !(idY % stride2)) { err += error[tid + stride]; err += error[tid + stride * width]; err += error[tid + stride * (width + 1)]; error[tid] = err; } stride = stride2; __syncthreads(); } } // Ambient sample distribution __global__ static void geometric_variation(PointDirection *deviceHits, int *seed, const unsigned int width, const unsigned int height, const unsigned int levels, const float alpha) { extern __shared__ PointDirection blockSharedMemory[]; unsigned int idX = blockDim.x * blockIdx.x + threadIdx.x; unsigned int idY = blockDim.y * blockIdx.y + threadIdx.y; unsigned int tid = idX + idY * width; unsigned int sid = blockDim.x * threadIdx.y + threadIdx.x; float *err = (float*)malloc(levels * sizeof(float)); unsigned int stride = 1u; PointDirection hit; unsigned int valid = idX < width && idY < height; if (valid) { hit = deviceHits[tid]; valid = valid_hit(hit); } if (!valid) hit.pos.x = hit.pos.y = hit.pos.z = hit.dir.x = hit.dir.y = hit.dir.z = 0.0f; PointDirection accum = hit; blockSharedMemory[sid] = hit; #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits width=%i, height=%i, accum=%g,%g,%g, %g,%g,%g, valid=%i\n", width, height, accum.pos.x, accum.pos.y, accum.pos.z, accum.dir.x, accum.dir.y, accum.dir.z, valid); #endif __syncthreads(); /* Calculate geometric error for each hit point to each quad-tree node. */ for (int i = 0; i < levels; i++) { #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits stride=%i, accum=%g,%g,%g\n", stride, accum.pos.x, accum.pos.y, accum.pos.z); #endif unsigned int stride2 = stride << 1; if (!(idX % stride2) && !(idY % stride2)) { accum = average_point_direction( accum, blockSharedMemory[sid + stride], blockSharedMemory[sid + stride * width], blockSharedMemory[sid + stride * (width + 1)] ); blockSharedMemory[sid] = accum; } __syncthreads(); err[i] = valid ? geometric_error(hit, blockSharedMemory[sid - idX % stride2 - (idY % stride2) * blockDim.x], alpha) : 0.0f; stride = stride2; } __syncthreads(); float *error = (float *)blockSharedMemory; for (int i = levels; i--; ) { unsigned int stride2 = stride >> 1; /* Calculate geometric error average at each quad-tree node. */ error[tid] = err[i]; __syncthreads(); if (i) reduce(error, i, idX, idY, width); // sum errors at this quad tree node /* Divide the pool proportinally to error at each quad-tree node. */ if (idX < width && idY < height && !(idX % stride) && !(idY % stride)) { valid = 0u; if (idX + stride2 < width) valid |= VALID_HORIZONTAL; if (idY + stride2 < height) valid |= VALID_VERTICAL; float err[4]; err[0] = error[tid]; err[1] = (valid2 & VALID_HORIZONTAL) ? error[tid + stride2] : 0.0f; err[2] = (valid2 & VALID_VERTICAL) ? error[tid + stride2 * width] : 0.0f; err[3] = (valid2 & (VALID_HORIZONTAL | VALID_VERTICAL)) == (VALID_HORIZONTAL | VALID_VERTICAL) ? error[tid + stride2 * (width + 1)] : 0.0f; float errSum = err[0] + err[1] + err[2] + err[3]; int seedSum = seed[tid]; float scoreSum = errSum > 0.0f ? seedSum / errSum : 0.0f; int s[4]; s[0] = scoreSum * err[0]; s[1] = scoreSum * err[1]; s[2] = scoreSum * err[2]; s[3] = scoreSum * err[3]; int diff = seedSum - s[0] - s[1] - s[2] - s[3]; #ifdef PRINT_CUDA if (!tid) printf("calc_score stride=%i, i=%i, errSum=%g, seedSum=%i, scoreSum=%g, diff=%i\n", stride, i, errSum, seedSum, scoreSum, diff); #endif if (diff && errSum > 0.0f) { float max[3] = { 0.0f, 0.0f, 0.0f }; // Will store up to 3 maximum values in err[] int maxi[3] = { -1, -1, -1 }; // Will store the indices of up to 3 maximum values in err[] for (int j = 0; j < 4; j++) { // Find 3 largest values if (err[j] > max[0]) { max[2] = max[1]; maxi[2] = maxi[1]; max[1] = max[0]; maxi[1] = maxi[0]; max[0] = err[j]; maxi[0] = j; } else if (err[j] > max[1]) { max[2] = max[1]; maxi[2] = maxi[1]; max[1] = err[j]; maxi[1] = j; } else if (err[j] > max[2]) { max[2] = err[j]; maxi[2] = j; } } if (diff > 2 && max[2] > 0.0f) { s[maxi[2]] += 1; diff -= 1; } if (diff > 1 && max[1] > 0.0f) { s[maxi[1]] += 1; diff -= 1; } if (diff && max[0] > 0.0f) { s[maxi[0]] += diff; } } seed[tid] = s[0]; if (valid & VALID_HORIZONTAL) seed[tid + stride2] = s[1]; if (valid & VALID_VERTICAL) { seed[tid + stride2 * width] = s[2]; if (valid & VALID_HORIZONTAL) seed[tid + stride2 * (width + 1)] = s[3]; } } __syncthreads(); stride = stride2; } free(err); } #else /* MULTI_BLOCK */ __global__ static void mip_map_hits(PointDirection *deviceHits, PointDirection *deviceMipMap, const unsigned int width, const unsigned int height) { extern __shared__ PointDirection blockSharedMemory[]; unsigned int idX = blockDim.x * blockIdx.x + threadIdx.x; unsigned int idY = blockDim.y * blockIdx.y + threadIdx.y; unsigned int tid = idX + idY * width; unsigned int sid = blockDim.x * threadIdx.y + threadIdx.x; unsigned int offset = 0u; unsigned int stride = 1u; unsigned int levelWidth = width; unsigned int levelHeight = height; PointDirection hit; unsigned int valid = idX < width && idY < height; if (valid) { hit = deviceHits[tid]; valid = valid_hit(hit); } if (!valid) hit.pos.x = hit.pos.y = hit.pos.z = hit.dir.x = hit.dir.y = hit.dir.z = 0.0f; PointDirection accum = hit; blockSharedMemory[sid] = hit; #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits width=%i, height=%i, accum=%g,%g,%g, %g,%g,%g, valid=%i\n", width, height, accum.pos.x, accum.pos.y, accum.pos.z, accum.dir.x, accum.dir.y, accum.dir.z, valid); #endif __syncthreads(); /* Calculate geometric error for each hit point to each quad-tree node. */ while (stride < blockDim.x) { #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits stride=%i, offset=%i, accum=%g,%g,%g\n", stride, offset, accum.pos.x, accum.pos.y, accum.pos.z); #endif unsigned int stride2 = stride << 1; levelWidth = (levelWidth - 1) / 2 + 1; levelHeight = (levelHeight - 1) / 2 + 1; if (!(idX % stride2) && !(idY % stride2)) { accum = average_point_direction( accum, blockSharedMemory[sid + stride], blockSharedMemory[sid + stride * blockDim.x], blockSharedMemory[sid + stride * (blockDim.x + 1)] ); blockSharedMemory[sid] = accum; deviceMipMap[offset + (idX + idY * levelWidth) / stride2] = accum; } #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits width=%i, height=%i, accum=%g,%g,%g\n", levelWidth, levelHeight, accum.pos.x, accum.pos.y, accum.pos.z); #endif __syncthreads(); stride = stride2; offset += levelWidth * levelHeight; } } __global__ static void calc_error(PointDirection *deviceHits, PointDirection *deviceMipMap, float *error, const unsigned int width, const unsigned int height, const unsigned int levels, float alpha) { unsigned int idX = blockDim.x * blockIdx.x + threadIdx.x; unsigned int idY = blockDim.y * blockIdx.y + threadIdx.y; unsigned int tid = idX + idY * width; unsigned int stride = 1u; unsigned int levelWidth = width; unsigned int levelHeight = height; if (idX < width && idY < height) { PointDirection hit = deviceHits[tid]; unsigned int valid = valid_hit(hit); if (!valid) hit.pos.x = hit.pos.y = hit.pos.z = hit.dir.x = hit.dir.y = hit.dir.z = 0.0f; PointDirection *mipMapLevel = deviceMipMap; /* Calculate geometric error for each hit point to each quad-tree node. */ for (unsigned int i = 0u; i < levels; i++) { #ifdef PRINT_CUDA if (!tid) printf("calc_error stride=%i, i=%i, valid=%i\n", stride, i, valid); #endif stride <<= 1; levelWidth = (levelWidth - 1) / 2 + 1; levelHeight = (levelHeight - 1) / 2 + 1; error[tid + i * width * height] = valid ? geometric_error(hit, mipMapLevel[idX / stride + idY / stride * levelWidth], alpha) : 0.0f; mipMapLevel += levelWidth * levelHeight; } } } __global__ static void reduce_error(float *error, const unsigned int width, const unsigned int height, const unsigned int levels, const unsigned int scale) { unsigned int idX = scale * (blockDim.x * blockIdx.x + threadIdx.x); unsigned int idY = scale * (blockDim.y * blockIdx.y + threadIdx.y); unsigned int tid = idX + idY * width; unsigned int valid = idX < width && idY < height; for (unsigned int j = 1u; j < levels; j++) { tid += width * height; float err = valid ? error[tid] : 0.0f; unsigned int stride = scale; while (stride < (scale << j) && stride < blockDim.x * scale) { #ifdef PRINT_CUDA if (!(tid % (width * height))) printf("reduce_error stride=%i, j=%i, scale=%i, err=%g\n", stride, j, scale, err); #endif unsigned int stride2 = stride << 1; if (valid && !(idX % stride2) && !(idY % stride2)) { if (idX + stride < width) err += error[tid + stride]; if (idY + stride < height) { err += error[tid + stride * width]; if (idX + stride < width) err += error[tid + stride * (width + 1)]; } error[tid] = err; } stride = stride2; __syncthreads(); } } } __global__ static void calc_score(float *error, int *seed, const unsigned int width, const unsigned int height, const unsigned int levels, const unsigned int scale) { unsigned int idX = scale * (blockDim.x * blockIdx.x + threadIdx.x); unsigned int idY = scale * (blockDim.y * blockIdx.y + threadIdx.y); unsigned int tid = idX + idY * width; unsigned int valid = idX < width && idY < height; unsigned int stride = scale << levels; for (int i = levels; i--; ) { unsigned int stride2 = stride >> 1; /* Divide the pool proportinally to error at each quad-tree node. */ if (valid && !(idX % stride) && !(idY % stride)) { unsigned int valid2 = 0u; if (idX + stride2 < width) valid2 |= VALID_HORIZONTAL; if (idY + stride2 < height) valid2 |= VALID_VERTICAL; unsigned int lid = tid + width * height * i; float err[4]; err[0] = error[lid]; err[1] = (valid2 & VALID_HORIZONTAL) ? error[lid + stride2] : 0.0f; err[2] = (valid2 & VALID_VERTICAL) ? error[lid + stride2 * width] : 0.0f; err[3] = (valid2 & (VALID_HORIZONTAL | VALID_VERTICAL)) == (VALID_HORIZONTAL | VALID_VERTICAL) ? error[lid + stride2 * (width + 1)] : 0.0f; float errSum = err[0] + err[1] + err[2] + err[3]; int seedSum = seed[tid]; float scoreSum = errSum > 0.0f ? seedSum / errSum : 0.0f; int s[4]; s[0] = scoreSum * err[0]; s[1] = scoreSum * err[1]; s[2] = scoreSum * err[2]; s[3] = scoreSum * err[3]; int diff = seedSum - s[0] - s[1] - s[2] - s[3]; #ifdef PRINT_CUDA if (!tid) printf("calc_score stride=%i, i=%i, tid=%i, lid=%i, scale=%i, errSum=%g, seedSum=%i, scoreSum=%g, diff=%i\n", stride, i, tid, lid, scale, errSum, seedSum, scoreSum, diff); #endif if (diff && errSum > 0.0f) { float max[3] = { 0.0f, 0.0f, 0.0f }; // Will store up to 3 maximum values in err[] int maxi[3] = { -1, -1, -1 }; // Will store the indices of up to 3 maximum values in err[] for (int j = 0; j < 4; j++) { // Find 3 largest values if (err[j] > max[0]) { max[2] = max[1]; maxi[2] = maxi[1]; max[1] = max[0]; maxi[1] = maxi[0]; max[0] = err[j]; maxi[0] = j; } else if (err[j] > max[1]) { max[2] = max[1]; maxi[2] = maxi[1]; max[1] = err[j]; maxi[1] = j; } else if (err[j] > max[2]) { max[2] = err[j]; maxi[2] = j; } } if (diff > 2 && max[2] > 0.0f) { s[maxi[2]] += 1; diff -= 1; } if (diff > 1 && max[1] > 0.0f) { s[maxi[1]] += 1; diff -= 1; } if (diff && max[0] > 0.0f) { s[maxi[0]] += diff; } } seed[tid] = s[0]; if (valid2 & VALID_HORIZONTAL) seed[tid + stride2] = s[1]; if (valid2 & VALID_VERTICAL) { seed[tid + stride2 * width] = s[2]; if (valid2 & VALID_HORIZONTAL) seed[tid + stride2 * (width + 1)] = s[3]; } } __syncthreads(); stride = stride2; } } /* Calculate average of hits at each quad tree node */ static void CCALL cuda_mip_map_hits_recursive(PointDirection *deviceHits, PointDirection *deviceMipMap, const unsigned int width, const unsigned int height, const unsigned int levels, const unsigned int maxThreadsPerBlock, dim3 dimGrid, dim3 dimBlock, size_t blockSharedMemorySize) { /* Calculate average of hits at each quad tree node */ hipLaunchKernelGGL(( mip_map_hits) , dim3(dimGrid), dim3(dimBlock), blockSharedMemorySize , 0, deviceHits, deviceMipMap, width, height); hipDeviceSynchronize(); checkLastCudaError(); if ( dimBlock.x < (1u << levels) ) { unsigned int complete = 1u; unsigned int offset = 0u; unsigned int levelWidth = (width - 1) / 2 + 1; unsigned int levelHeight = (height - 1) / 2 + 1; for (unsigned int i = 1u; i < dimBlock.x / 2u; i <<= 1) { complete++; offset += levelWidth * levelHeight; levelWidth = (levelWidth - 1) / 2 + 1; levelHeight = (levelHeight - 1) / 2 + 1; } const unsigned int blockDim = calc_block_dim(maxThreadsPerBlock, levels - complete); const unsigned int blocksX = (levelWidth - 1) / blockDim + 1; const unsigned int blocksY = (levelHeight - 1) / blockDim + 1; const dim3 dimSuperGrid(blocksX, blocksY); const dim3 dimSuperBlock(blockDim, blockDim); #ifdef PRINT_CUDA fprintf(stderr, "cuda_mip_map_hits_recursive: offset %i, width %i, height %i, levels %i\n", offset, levelWidth, levelHeight, levels - complete); #endif cuda_mip_map_hits_recursive(deviceMipMap + offset, deviceMipMap + offset + levelWidth * levelHeight, levelWidth, levelHeight, levels - complete, maxThreadsPerBlock, dimSuperGrid, dimSuperBlock, dimSuperBlock.x * dimSuperBlock.y * sizeof(PointDirection)); } } /* Calculate average geometric variation for each quad tree node */ static void CCALL cuda_score_hits_recursive(float *deviceError, int *deviceSeeds, const unsigned int width, const unsigned int height, unsigned int levels, const unsigned int scale, const unsigned int maxThreadsPerBlock, dim3 dimGrid, dim3 dimBlock) { /* Perform reduction on error */ hipLaunchKernelGGL(( reduce_error) , dim3(dimGrid), dim3(dimBlock), 0 , 0, deviceError, width, height, levels, scale); hipDeviceSynchronize(); checkLastCudaError(); /* Recruse if block not large enough for reduction */ if ( dimBlock.x < (1u << levels) ) { unsigned int complete = 0u; for (unsigned int i = 1u; i < dimBlock.x; i <<= 1) complete++; const unsigned int blockDim = calc_block_dim(maxThreadsPerBlock, levels - complete); const unsigned int blocksX = (dimGrid.x - 1) / blockDim + 1; const unsigned int blocksY = (dimGrid.y - 1) / blockDim + 1; const dim3 dimSuperGrid(blocksX, blocksY); const dim3 dimSuperBlock(blockDim, blockDim); cuda_score_hits_recursive(deviceError + width * height * complete, deviceSeeds, width, height, levels - complete, scale * dimBlock.x, maxThreadsPerBlock, dimSuperGrid, dimSuperBlock); levels = complete; } /* Calculate score for each leaf node based on error */ hipLaunchKernelGGL(( calc_score) , dim3(dimGrid), dim3(dimBlock), 0 , 0, deviceError, deviceSeeds, width, height, levels, scale); hipDeviceSynchronize(); checkLastCudaError(); } #endif /* MULTI_BLOCK */ /* Score the relative need for an irradiance cache entry at each hit point */ void CCALL cuda_score_hits(PointDirection *hits, int *seeds, const unsigned int width, const unsigned int height, const float weight, const unsigned int seed_count) { PointDirection *deviceHits; #ifdef MULTI_BLOCK PointDirection *deviceMipMap; float *deviceError; #endif int *deviceSeeds; /* Calculate number of levels */ unsigned int levels = 0; unsigned int size = width > height ? width : height; if ( !isPowerOfTwo(size) ) levels++; while ( size >>= 1 ) levels++; /* Determine block size */ hipDeviceProp_t deviceProp; int deviceNum; hipGetDevice(&deviceNum); hipGetDeviceProperties(&deviceProp, deviceNum); #ifdef CAP_REGISTERS_PER_THREAD const unsigned int registersPerBlock = deviceProp.regsPerBlock; unsigned int threadsPerBlock = deviceProp.maxThreadsPerBlock; while (registersPerBlock / threadsPerBlock < REGISTERS_PER_THREAD) threadsPerBlock >>= 1; #else const unsigned int threadsPerBlock = deviceProp.maxThreadsPerBlock; #endif /* To support reduction, blockDim *must* be a power of two. */ const unsigned int blockDim = calc_block_dim(threadsPerBlock, levels); const unsigned int blocksX = (width - 1) / blockDim + 1; const unsigned int blocksY = (height - 1) / blockDim + 1; const size_t blockSharedMemorySize = blockDim * blockDim * sizeof(PointDirection); #ifndef MULTI_BLOCK if (blocksX != 1u || blocksY != 1u) err("Your CUDA hardware has insufficient block size %u threads (%u x %u blocks needed). Recompile with MULTI_BLOCK flag.", deviceProp.maxThreadsPerBlock, blocksX, blocksY); #endif if (blockSharedMemorySize > deviceProp.sharedMemPerBlock) err("Your CUDA hardware has insufficient block shared memory %" PRIu64 " (%" PRIu64 " needed).", deviceProp.sharedMemPerBlock, blockSharedMemorySize); const dim3 dimGrid(blocksX, blocksY); const dim3 dimBlock(blockDim, blockDim); #ifdef PRINT_CUDA fprintf(stderr, "Adaptive sampling: Block %i x %i, Grid %i x %i, Shared %i, Levels %i, Weight %g\n", blockDim, blockDim, blocksX, blocksY, blockSharedMemorySize, levels, weight); #endif /* Allocate memory and copy hits to the GPU */ size = width * height; checkCuda(hipMalloc(&deviceHits, size * sizeof(PointDirection))); checkCuda(hipMemcpy(deviceHits, hits, size * sizeof(PointDirection), hipMemcpyHostToDevice)); #ifdef MULTI_BLOCK /* Allocate memory on the GPU */ unsigned int mipMapSize = 0u; unsigned int levelWidth = width; unsigned int levelHeight = height; while (levelWidth > 1u || levelHeight > 1u) { levelWidth = (levelWidth - 1) / 2 + 1; levelHeight = (levelHeight - 1) / 2 + 1; mipMapSize += levelWidth * levelHeight; } checkCuda(hipMalloc(&deviceMipMap, mipMapSize * sizeof(PointDirection))); // Storage requirement for mip map is 1/3 or original data checkCuda(hipMalloc(&deviceError, size * levels * sizeof(float))); /* Calculate average of hits at each quad tree node */ cuda_mip_map_hits_recursive(deviceHits, deviceMipMap, width, height, levels, threadsPerBlock, dimGrid, dimBlock, blockSharedMemorySize); /* Calculate geometric variation at each quad tree node */ hipLaunchKernelGGL(( calc_error) , dim3(dimGrid), dim3(dimBlock), 0 , 0, deviceHits, deviceMipMap, deviceError, width, height, levels, weight); hipDeviceSynchronize(); checkLastCudaError(); /* Free memory on the GPU */ checkCuda(hipFree(deviceHits)); checkCuda(hipFree(deviceMipMap)); #endif /* MULTI_BLOCK */ /* Allocate memory and copy first seed to the GPU */ seeds[0] = seed_count; #ifdef PRINT_CUDA fprintf(stderr, "Target total score: %i\n", seed_count); #endif checkCuda(hipMalloc(&deviceSeeds, size * sizeof(int))); checkCuda(hipMemcpy(deviceSeeds, seeds, sizeof(int), hipMemcpyHostToDevice)); // transfer only first entry #ifdef MULTI_BLOCK /* Calculate average geometric variation for each quad tree node */ cuda_score_hits_recursive(deviceError, deviceSeeds, width, height, levels, 1u, threadsPerBlock, dimGrid, dimBlock); /* Free memory on the GPU */ checkCuda(hipFree(deviceError)); #else /* MULTI_BLOCK */ /* Run kernel */ hipLaunchKernelGGL(( geometric_variation) , dim3(dimGrid), dim3(dimBlock), blockSharedMemorySize , 0, deviceHits, deviceSeeds, width, height, levels, weight); hipDeviceSynchronize(); checkLastCudaError(); /* Free memory on the GPU */ checkCuda(hipFree(deviceHits)); #endif /* MULTI_BLOCK */ /* Copy results from GPU and free memory */ checkCuda(hipMemcpy(seeds, deviceSeeds, size * sizeof(int), hipMemcpyDeviceToHost)); checkCuda(hipFree(deviceSeeds)); } static void printDevProp(const hipDeviceProp_t *devProp) { fprintf(stderr, "Revision number: %d.%d\n", devProp->major, devProp->minor); fprintf(stderr, "Name: %s\n", devProp->name); fprintf(stderr, "Total global memory: %" PRIu64 " bytes\n", devProp->totalGlobalMem); fprintf(stderr, "Total constant memory: %" PRIu64 " bytes\n", devProp->totalConstMem); fprintf(stderr, "L2 cache size: %u bytes\n", devProp->l2CacheSize); fprintf(stderr, "Maximum threads per block: %d\n", devProp->maxThreadsPerBlock); fprintf(stderr, "Shared memory per block: %" PRIu64 " bytes\n", devProp->sharedMemPerBlock); fprintf(stderr, "Registers per block: %d\n", devProp->regsPerBlock); fprintf(stderr, "Maximum threads per multiprocessor: %d\n", devProp->maxThreadsPerMultiProcessor); fprintf(stderr, "Shared mem per multiprocessor: %" PRIu64 " bytes\n", devProp->sharedMemPerMultiprocessor); fprintf(stderr, "Registers per multiprocessor: %d\n", devProp->regsPerMultiprocessor); fprintf(stderr, "Warp size: %d\n", devProp->warpSize); fprintf(stderr, "Maximum memory pitch: %" PRIu64 " bytes\n", devProp->memPitch); for (int i = 0; i < 3; ++i) fprintf(stderr, "Maximum dimension %d of block: %d\n", i, devProp->maxThreadsDim[i]); for (int i = 0; i < 3; ++i) fprintf(stderr, "Maximum dimension %d of grid: %d\n", i, devProp->maxGridSize[i]); fprintf(stderr, "Global memory bus width: %d bits\n", devProp->memoryBusWidth); fprintf(stderr, "Peak memory clock frequency: %d kHz\n", devProp->memoryClockRate); fprintf(stderr, "Clock rate: %d kHz\n", devProp->clockRate); fprintf(stderr, "Texture alignment: %" PRIu64 "\n", devProp->textureAlignment); fprintf(stderr, "Texture pitch alignment: %" PRIu64 "\n", devProp->texturePitchAlignment); fprintf(stderr, "Concurrent kernels: %s\n", devProp->concurrentKernels ? "Yes" : "No"); fprintf(stderr, "Concurrent copy and execution: %s\n", devProp->deviceOverlap ? "Yes" : "No"); fprintf(stderr, "Number of async engines: %d\n", devProp->asyncEngineCount); fprintf(stderr, "Number of multiprocessors: %d\n", devProp->multiProcessorCount); fprintf(stderr, "Kernel execution timeout: %s\n", devProp->kernelExecTimeoutEnabled ? "Yes" : "No"); fprintf(stderr, "Unified addressing with host: %s\n", devProp->unifiedAddressing ? "Yes" : "No"); fprintf(stderr, "Device can map host memory: %s\n", devProp->canMapHostMemory ? "Yes" : "No"); fprintf(stderr, "Device supports managed memory: %s\n", devProp->managedMemory ? "Yes" : "No"); return; } void printCUDAProp() { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); fprintf(stderr, "CUDA Device Query...\n"); fprintf(stderr, "There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties fprintf(stderr, "\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDevProp(&devProp); } } #ifdef __cplusplus } #endif
2a0d3191fab0cae456a0bb5608fcdffc5c7e9d06.cu
/* * cuda_adaptive_seeding.cu - routines for adaptive seeding on GPUs. */ #include "accelerad_copyright.h" #include <stdio.h> #include <stdlib.h> #define __STDC_FORMAT_MACROS #include <inttypes.h> #include "kmeans.h" //#define PRINT_CUDA #define MULTI_BLOCK #define VALID_HORIZONTAL 0x01 /* Horizontal neighbor quad tree node is valid. */ #define VALID_VERTICAL 0x10 /* Vertical neighbor quad tree node is valid. */ #ifdef CAP_REGISTERS_PER_THREAD #include "accelerad.h" /* This is the maximum number of registers used by any cuda kernel in this in this file, found by using the flag "-Xptxas -v" to compile in nvcc. This should be updated when changes are made to the kernels. */ #ifdef RTX #define REGISTERS_PER_THREAD 36 /* Registers per thread under CUDA 10.0 */ #else #define REGISTERS_PER_THREAD 23 /* Registers per thread under CUDA 7.5 */ #endif #endif #ifdef __cplusplus extern "C" { #endif // Ambient sample distribution based on Wang et al. (2009) "An efficient GPU-based approach for interactive global illumination" __device__ inline static PointDirection average_point_direction(const PointDirection& a, const PointDirection& b, const PointDirection& c, const PointDirection& d) { PointDirection average; average.pos = (a.pos + b.pos + c.pos + d.pos) / 4.0f; //average.dir = optix::normalize(a.dir + b.dir + c.dir + d.dir); average.dir = a.dir + b.dir + c.dir + d.dir; const float length = optix::length(average.dir); if ( length > 0.0f ) average.dir /= length; return average; } __device__ inline static float geometric_error(const PointDirection& a, const PointDirection& b, const float alpha) { return alpha * optix::length(a.pos - b.pos) + sqrtf(2.0f * fmaxf(1.0f - optix::dot(a.dir, b.dir), 0.0f)); } __device__ inline static unsigned int valid_hit(const PointDirection& hit) { return optix::dot(hit.dir, hit.dir) > 0.0f && optix::dot(hit.pos, hit.pos) >= 0.0f; } static int CCALL isPowerOfTwo(unsigned int x) { return ((x != 0) && !(x & (x - 1))); } static unsigned int CCALL calc_block_dim(const unsigned int maxThreadsPerBlock, const unsigned int levels) { unsigned int blockDim = 1u; unsigned int size = maxThreadsPerBlock << 1; while ( size >>= 2 ) blockDim <<= 1; if ( blockDim > (1u << levels) ) blockDim = 1u << levels; return blockDim; } #ifndef MULTI_BLOCK __device__ inline static void reduce(float *error, const int level, const int idX, const int idY, const int width) { int tid = idX + idY * width; unsigned int stride = 1u; float err = error[tid]; for (int i = 0; i < level; i++) { unsigned int stride2 = stride << 1; if (!(idX % stride2) && !(idY % stride2)) { err += error[tid + stride]; err += error[tid + stride * width]; err += error[tid + stride * (width + 1)]; error[tid] = err; } stride = stride2; __syncthreads(); } } // Ambient sample distribution __global__ static void geometric_variation(PointDirection *deviceHits, int *seed, const unsigned int width, const unsigned int height, const unsigned int levels, const float alpha) { extern __shared__ PointDirection blockSharedMemory[]; unsigned int idX = blockDim.x * blockIdx.x + threadIdx.x; unsigned int idY = blockDim.y * blockIdx.y + threadIdx.y; unsigned int tid = idX + idY * width; unsigned int sid = blockDim.x * threadIdx.y + threadIdx.x; float *err = (float*)malloc(levels * sizeof(float)); unsigned int stride = 1u; PointDirection hit; unsigned int valid = idX < width && idY < height; if (valid) { hit = deviceHits[tid]; valid = valid_hit(hit); } if (!valid) hit.pos.x = hit.pos.y = hit.pos.z = hit.dir.x = hit.dir.y = hit.dir.z = 0.0f; PointDirection accum = hit; blockSharedMemory[sid] = hit; #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits width=%i, height=%i, accum=%g,%g,%g, %g,%g,%g, valid=%i\n", width, height, accum.pos.x, accum.pos.y, accum.pos.z, accum.dir.x, accum.dir.y, accum.dir.z, valid); #endif __syncthreads(); /* Calculate geometric error for each hit point to each quad-tree node. */ for (int i = 0; i < levels; i++) { #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits stride=%i, accum=%g,%g,%g\n", stride, accum.pos.x, accum.pos.y, accum.pos.z); #endif unsigned int stride2 = stride << 1; if (!(idX % stride2) && !(idY % stride2)) { accum = average_point_direction( accum, blockSharedMemory[sid + stride], blockSharedMemory[sid + stride * width], blockSharedMemory[sid + stride * (width + 1)] ); blockSharedMemory[sid] = accum; } __syncthreads(); err[i] = valid ? geometric_error(hit, blockSharedMemory[sid - idX % stride2 - (idY % stride2) * blockDim.x], alpha) : 0.0f; stride = stride2; } __syncthreads(); float *error = (float *)blockSharedMemory; for (int i = levels; i--; ) { unsigned int stride2 = stride >> 1; /* Calculate geometric error average at each quad-tree node. */ error[tid] = err[i]; __syncthreads(); if (i) reduce(error, i, idX, idY, width); // sum errors at this quad tree node /* Divide the pool proportinally to error at each quad-tree node. */ if (idX < width && idY < height && !(idX % stride) && !(idY % stride)) { valid = 0u; if (idX + stride2 < width) valid |= VALID_HORIZONTAL; if (idY + stride2 < height) valid |= VALID_VERTICAL; float err[4]; err[0] = error[tid]; err[1] = (valid2 & VALID_HORIZONTAL) ? error[tid + stride2] : 0.0f; err[2] = (valid2 & VALID_VERTICAL) ? error[tid + stride2 * width] : 0.0f; err[3] = (valid2 & (VALID_HORIZONTAL | VALID_VERTICAL)) == (VALID_HORIZONTAL | VALID_VERTICAL) ? error[tid + stride2 * (width + 1)] : 0.0f; float errSum = err[0] + err[1] + err[2] + err[3]; int seedSum = seed[tid]; float scoreSum = errSum > 0.0f ? seedSum / errSum : 0.0f; int s[4]; s[0] = scoreSum * err[0]; s[1] = scoreSum * err[1]; s[2] = scoreSum * err[2]; s[3] = scoreSum * err[3]; int diff = seedSum - s[0] - s[1] - s[2] - s[3]; #ifdef PRINT_CUDA if (!tid) printf("calc_score stride=%i, i=%i, errSum=%g, seedSum=%i, scoreSum=%g, diff=%i\n", stride, i, errSum, seedSum, scoreSum, diff); #endif if (diff && errSum > 0.0f) { float max[3] = { 0.0f, 0.0f, 0.0f }; // Will store up to 3 maximum values in err[] int maxi[3] = { -1, -1, -1 }; // Will store the indices of up to 3 maximum values in err[] for (int j = 0; j < 4; j++) { // Find 3 largest values if (err[j] > max[0]) { max[2] = max[1]; maxi[2] = maxi[1]; max[1] = max[0]; maxi[1] = maxi[0]; max[0] = err[j]; maxi[0] = j; } else if (err[j] > max[1]) { max[2] = max[1]; maxi[2] = maxi[1]; max[1] = err[j]; maxi[1] = j; } else if (err[j] > max[2]) { max[2] = err[j]; maxi[2] = j; } } if (diff > 2 && max[2] > 0.0f) { s[maxi[2]] += 1; diff -= 1; } if (diff > 1 && max[1] > 0.0f) { s[maxi[1]] += 1; diff -= 1; } if (diff && max[0] > 0.0f) { s[maxi[0]] += diff; } } seed[tid] = s[0]; if (valid & VALID_HORIZONTAL) seed[tid + stride2] = s[1]; if (valid & VALID_VERTICAL) { seed[tid + stride2 * width] = s[2]; if (valid & VALID_HORIZONTAL) seed[tid + stride2 * (width + 1)] = s[3]; } } __syncthreads(); stride = stride2; } free(err); } #else /* MULTI_BLOCK */ __global__ static void mip_map_hits(PointDirection *deviceHits, PointDirection *deviceMipMap, const unsigned int width, const unsigned int height) { extern __shared__ PointDirection blockSharedMemory[]; unsigned int idX = blockDim.x * blockIdx.x + threadIdx.x; unsigned int idY = blockDim.y * blockIdx.y + threadIdx.y; unsigned int tid = idX + idY * width; unsigned int sid = blockDim.x * threadIdx.y + threadIdx.x; unsigned int offset = 0u; unsigned int stride = 1u; unsigned int levelWidth = width; unsigned int levelHeight = height; PointDirection hit; unsigned int valid = idX < width && idY < height; if (valid) { hit = deviceHits[tid]; valid = valid_hit(hit); } if (!valid) hit.pos.x = hit.pos.y = hit.pos.z = hit.dir.x = hit.dir.y = hit.dir.z = 0.0f; PointDirection accum = hit; blockSharedMemory[sid] = hit; #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits width=%i, height=%i, accum=%g,%g,%g, %g,%g,%g, valid=%i\n", width, height, accum.pos.x, accum.pos.y, accum.pos.z, accum.dir.x, accum.dir.y, accum.dir.z, valid); #endif __syncthreads(); /* Calculate geometric error for each hit point to each quad-tree node. */ while (stride < blockDim.x) { #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits stride=%i, offset=%i, accum=%g,%g,%g\n", stride, offset, accum.pos.x, accum.pos.y, accum.pos.z); #endif unsigned int stride2 = stride << 1; levelWidth = (levelWidth - 1) / 2 + 1; levelHeight = (levelHeight - 1) / 2 + 1; if (!(idX % stride2) && !(idY % stride2)) { accum = average_point_direction( accum, blockSharedMemory[sid + stride], blockSharedMemory[sid + stride * blockDim.x], blockSharedMemory[sid + stride * (blockDim.x + 1)] ); blockSharedMemory[sid] = accum; deviceMipMap[offset + (idX + idY * levelWidth) / stride2] = accum; } #ifdef PRINT_CUDA if (!tid) printf("mip_map_hits width=%i, height=%i, accum=%g,%g,%g\n", levelWidth, levelHeight, accum.pos.x, accum.pos.y, accum.pos.z); #endif __syncthreads(); stride = stride2; offset += levelWidth * levelHeight; } } __global__ static void calc_error(PointDirection *deviceHits, PointDirection *deviceMipMap, float *error, const unsigned int width, const unsigned int height, const unsigned int levels, float alpha) { unsigned int idX = blockDim.x * blockIdx.x + threadIdx.x; unsigned int idY = blockDim.y * blockIdx.y + threadIdx.y; unsigned int tid = idX + idY * width; unsigned int stride = 1u; unsigned int levelWidth = width; unsigned int levelHeight = height; if (idX < width && idY < height) { PointDirection hit = deviceHits[tid]; unsigned int valid = valid_hit(hit); if (!valid) hit.pos.x = hit.pos.y = hit.pos.z = hit.dir.x = hit.dir.y = hit.dir.z = 0.0f; PointDirection *mipMapLevel = deviceMipMap; /* Calculate geometric error for each hit point to each quad-tree node. */ for (unsigned int i = 0u; i < levels; i++) { #ifdef PRINT_CUDA if (!tid) printf("calc_error stride=%i, i=%i, valid=%i\n", stride, i, valid); #endif stride <<= 1; levelWidth = (levelWidth - 1) / 2 + 1; levelHeight = (levelHeight - 1) / 2 + 1; error[tid + i * width * height] = valid ? geometric_error(hit, mipMapLevel[idX / stride + idY / stride * levelWidth], alpha) : 0.0f; mipMapLevel += levelWidth * levelHeight; } } } __global__ static void reduce_error(float *error, const unsigned int width, const unsigned int height, const unsigned int levels, const unsigned int scale) { unsigned int idX = scale * (blockDim.x * blockIdx.x + threadIdx.x); unsigned int idY = scale * (blockDim.y * blockIdx.y + threadIdx.y); unsigned int tid = idX + idY * width; unsigned int valid = idX < width && idY < height; for (unsigned int j = 1u; j < levels; j++) { tid += width * height; float err = valid ? error[tid] : 0.0f; unsigned int stride = scale; while (stride < (scale << j) && stride < blockDim.x * scale) { #ifdef PRINT_CUDA if (!(tid % (width * height))) printf("reduce_error stride=%i, j=%i, scale=%i, err=%g\n", stride, j, scale, err); #endif unsigned int stride2 = stride << 1; if (valid && !(idX % stride2) && !(idY % stride2)) { if (idX + stride < width) err += error[tid + stride]; if (idY + stride < height) { err += error[tid + stride * width]; if (idX + stride < width) err += error[tid + stride * (width + 1)]; } error[tid] = err; } stride = stride2; __syncthreads(); } } } __global__ static void calc_score(float *error, int *seed, const unsigned int width, const unsigned int height, const unsigned int levels, const unsigned int scale) { unsigned int idX = scale * (blockDim.x * blockIdx.x + threadIdx.x); unsigned int idY = scale * (blockDim.y * blockIdx.y + threadIdx.y); unsigned int tid = idX + idY * width; unsigned int valid = idX < width && idY < height; unsigned int stride = scale << levels; for (int i = levels; i--; ) { unsigned int stride2 = stride >> 1; /* Divide the pool proportinally to error at each quad-tree node. */ if (valid && !(idX % stride) && !(idY % stride)) { unsigned int valid2 = 0u; if (idX + stride2 < width) valid2 |= VALID_HORIZONTAL; if (idY + stride2 < height) valid2 |= VALID_VERTICAL; unsigned int lid = tid + width * height * i; float err[4]; err[0] = error[lid]; err[1] = (valid2 & VALID_HORIZONTAL) ? error[lid + stride2] : 0.0f; err[2] = (valid2 & VALID_VERTICAL) ? error[lid + stride2 * width] : 0.0f; err[3] = (valid2 & (VALID_HORIZONTAL | VALID_VERTICAL)) == (VALID_HORIZONTAL | VALID_VERTICAL) ? error[lid + stride2 * (width + 1)] : 0.0f; float errSum = err[0] + err[1] + err[2] + err[3]; int seedSum = seed[tid]; float scoreSum = errSum > 0.0f ? seedSum / errSum : 0.0f; int s[4]; s[0] = scoreSum * err[0]; s[1] = scoreSum * err[1]; s[2] = scoreSum * err[2]; s[3] = scoreSum * err[3]; int diff = seedSum - s[0] - s[1] - s[2] - s[3]; #ifdef PRINT_CUDA if (!tid) printf("calc_score stride=%i, i=%i, tid=%i, lid=%i, scale=%i, errSum=%g, seedSum=%i, scoreSum=%g, diff=%i\n", stride, i, tid, lid, scale, errSum, seedSum, scoreSum, diff); #endif if (diff && errSum > 0.0f) { float max[3] = { 0.0f, 0.0f, 0.0f }; // Will store up to 3 maximum values in err[] int maxi[3] = { -1, -1, -1 }; // Will store the indices of up to 3 maximum values in err[] for (int j = 0; j < 4; j++) { // Find 3 largest values if (err[j] > max[0]) { max[2] = max[1]; maxi[2] = maxi[1]; max[1] = max[0]; maxi[1] = maxi[0]; max[0] = err[j]; maxi[0] = j; } else if (err[j] > max[1]) { max[2] = max[1]; maxi[2] = maxi[1]; max[1] = err[j]; maxi[1] = j; } else if (err[j] > max[2]) { max[2] = err[j]; maxi[2] = j; } } if (diff > 2 && max[2] > 0.0f) { s[maxi[2]] += 1; diff -= 1; } if (diff > 1 && max[1] > 0.0f) { s[maxi[1]] += 1; diff -= 1; } if (diff && max[0] > 0.0f) { s[maxi[0]] += diff; } } seed[tid] = s[0]; if (valid2 & VALID_HORIZONTAL) seed[tid + stride2] = s[1]; if (valid2 & VALID_VERTICAL) { seed[tid + stride2 * width] = s[2]; if (valid2 & VALID_HORIZONTAL) seed[tid + stride2 * (width + 1)] = s[3]; } } __syncthreads(); stride = stride2; } } /* Calculate average of hits at each quad tree node */ static void CCALL cuda_mip_map_hits_recursive(PointDirection *deviceHits, PointDirection *deviceMipMap, const unsigned int width, const unsigned int height, const unsigned int levels, const unsigned int maxThreadsPerBlock, dim3 dimGrid, dim3 dimBlock, size_t blockSharedMemorySize) { /* Calculate average of hits at each quad tree node */ mip_map_hits <<< dimGrid, dimBlock, blockSharedMemorySize >>> (deviceHits, deviceMipMap, width, height); cudaDeviceSynchronize(); checkLastCudaError(); if ( dimBlock.x < (1u << levels) ) { unsigned int complete = 1u; unsigned int offset = 0u; unsigned int levelWidth = (width - 1) / 2 + 1; unsigned int levelHeight = (height - 1) / 2 + 1; for (unsigned int i = 1u; i < dimBlock.x / 2u; i <<= 1) { complete++; offset += levelWidth * levelHeight; levelWidth = (levelWidth - 1) / 2 + 1; levelHeight = (levelHeight - 1) / 2 + 1; } const unsigned int blockDim = calc_block_dim(maxThreadsPerBlock, levels - complete); const unsigned int blocksX = (levelWidth - 1) / blockDim + 1; const unsigned int blocksY = (levelHeight - 1) / blockDim + 1; const dim3 dimSuperGrid(blocksX, blocksY); const dim3 dimSuperBlock(blockDim, blockDim); #ifdef PRINT_CUDA fprintf(stderr, "cuda_mip_map_hits_recursive: offset %i, width %i, height %i, levels %i\n", offset, levelWidth, levelHeight, levels - complete); #endif cuda_mip_map_hits_recursive(deviceMipMap + offset, deviceMipMap + offset + levelWidth * levelHeight, levelWidth, levelHeight, levels - complete, maxThreadsPerBlock, dimSuperGrid, dimSuperBlock, dimSuperBlock.x * dimSuperBlock.y * sizeof(PointDirection)); } } /* Calculate average geometric variation for each quad tree node */ static void CCALL cuda_score_hits_recursive(float *deviceError, int *deviceSeeds, const unsigned int width, const unsigned int height, unsigned int levels, const unsigned int scale, const unsigned int maxThreadsPerBlock, dim3 dimGrid, dim3 dimBlock) { /* Perform reduction on error */ reduce_error <<< dimGrid, dimBlock, 0 >>> (deviceError, width, height, levels, scale); cudaDeviceSynchronize(); checkLastCudaError(); /* Recruse if block not large enough for reduction */ if ( dimBlock.x < (1u << levels) ) { unsigned int complete = 0u; for (unsigned int i = 1u; i < dimBlock.x; i <<= 1) complete++; const unsigned int blockDim = calc_block_dim(maxThreadsPerBlock, levels - complete); const unsigned int blocksX = (dimGrid.x - 1) / blockDim + 1; const unsigned int blocksY = (dimGrid.y - 1) / blockDim + 1; const dim3 dimSuperGrid(blocksX, blocksY); const dim3 dimSuperBlock(blockDim, blockDim); cuda_score_hits_recursive(deviceError + width * height * complete, deviceSeeds, width, height, levels - complete, scale * dimBlock.x, maxThreadsPerBlock, dimSuperGrid, dimSuperBlock); levels = complete; } /* Calculate score for each leaf node based on error */ calc_score <<< dimGrid, dimBlock, 0 >>> (deviceError, deviceSeeds, width, height, levels, scale); cudaDeviceSynchronize(); checkLastCudaError(); } #endif /* MULTI_BLOCK */ /* Score the relative need for an irradiance cache entry at each hit point */ void CCALL cuda_score_hits(PointDirection *hits, int *seeds, const unsigned int width, const unsigned int height, const float weight, const unsigned int seed_count) { PointDirection *deviceHits; #ifdef MULTI_BLOCK PointDirection *deviceMipMap; float *deviceError; #endif int *deviceSeeds; /* Calculate number of levels */ unsigned int levels = 0; unsigned int size = width > height ? width : height; if ( !isPowerOfTwo(size) ) levels++; while ( size >>= 1 ) levels++; /* Determine block size */ cudaDeviceProp deviceProp; int deviceNum; cudaGetDevice(&deviceNum); cudaGetDeviceProperties(&deviceProp, deviceNum); #ifdef CAP_REGISTERS_PER_THREAD const unsigned int registersPerBlock = deviceProp.regsPerBlock; unsigned int threadsPerBlock = deviceProp.maxThreadsPerBlock; while (registersPerBlock / threadsPerBlock < REGISTERS_PER_THREAD) threadsPerBlock >>= 1; #else const unsigned int threadsPerBlock = deviceProp.maxThreadsPerBlock; #endif /* To support reduction, blockDim *must* be a power of two. */ const unsigned int blockDim = calc_block_dim(threadsPerBlock, levels); const unsigned int blocksX = (width - 1) / blockDim + 1; const unsigned int blocksY = (height - 1) / blockDim + 1; const size_t blockSharedMemorySize = blockDim * blockDim * sizeof(PointDirection); #ifndef MULTI_BLOCK if (blocksX != 1u || blocksY != 1u) err("Your CUDA hardware has insufficient block size %u threads (%u x %u blocks needed). Recompile with MULTI_BLOCK flag.", deviceProp.maxThreadsPerBlock, blocksX, blocksY); #endif if (blockSharedMemorySize > deviceProp.sharedMemPerBlock) err("Your CUDA hardware has insufficient block shared memory %" PRIu64 " (%" PRIu64 " needed).", deviceProp.sharedMemPerBlock, blockSharedMemorySize); const dim3 dimGrid(blocksX, blocksY); const dim3 dimBlock(blockDim, blockDim); #ifdef PRINT_CUDA fprintf(stderr, "Adaptive sampling: Block %i x %i, Grid %i x %i, Shared %i, Levels %i, Weight %g\n", blockDim, blockDim, blocksX, blocksY, blockSharedMemorySize, levels, weight); #endif /* Allocate memory and copy hits to the GPU */ size = width * height; checkCuda(cudaMalloc(&deviceHits, size * sizeof(PointDirection))); checkCuda(cudaMemcpy(deviceHits, hits, size * sizeof(PointDirection), cudaMemcpyHostToDevice)); #ifdef MULTI_BLOCK /* Allocate memory on the GPU */ unsigned int mipMapSize = 0u; unsigned int levelWidth = width; unsigned int levelHeight = height; while (levelWidth > 1u || levelHeight > 1u) { levelWidth = (levelWidth - 1) / 2 + 1; levelHeight = (levelHeight - 1) / 2 + 1; mipMapSize += levelWidth * levelHeight; } checkCuda(cudaMalloc(&deviceMipMap, mipMapSize * sizeof(PointDirection))); // Storage requirement for mip map is 1/3 or original data checkCuda(cudaMalloc(&deviceError, size * levels * sizeof(float))); /* Calculate average of hits at each quad tree node */ cuda_mip_map_hits_recursive(deviceHits, deviceMipMap, width, height, levels, threadsPerBlock, dimGrid, dimBlock, blockSharedMemorySize); /* Calculate geometric variation at each quad tree node */ calc_error <<< dimGrid, dimBlock, 0 >>> (deviceHits, deviceMipMap, deviceError, width, height, levels, weight); cudaDeviceSynchronize(); checkLastCudaError(); /* Free memory on the GPU */ checkCuda(cudaFree(deviceHits)); checkCuda(cudaFree(deviceMipMap)); #endif /* MULTI_BLOCK */ /* Allocate memory and copy first seed to the GPU */ seeds[0] = seed_count; #ifdef PRINT_CUDA fprintf(stderr, "Target total score: %i\n", seed_count); #endif checkCuda(cudaMalloc(&deviceSeeds, size * sizeof(int))); checkCuda(cudaMemcpy(deviceSeeds, seeds, sizeof(int), cudaMemcpyHostToDevice)); // transfer only first entry #ifdef MULTI_BLOCK /* Calculate average geometric variation for each quad tree node */ cuda_score_hits_recursive(deviceError, deviceSeeds, width, height, levels, 1u, threadsPerBlock, dimGrid, dimBlock); /* Free memory on the GPU */ checkCuda(cudaFree(deviceError)); #else /* MULTI_BLOCK */ /* Run kernel */ geometric_variation <<< dimGrid, dimBlock, blockSharedMemorySize >>> (deviceHits, deviceSeeds, width, height, levels, weight); cudaDeviceSynchronize(); checkLastCudaError(); /* Free memory on the GPU */ checkCuda(cudaFree(deviceHits)); #endif /* MULTI_BLOCK */ /* Copy results from GPU and free memory */ checkCuda(cudaMemcpy(seeds, deviceSeeds, size * sizeof(int), cudaMemcpyDeviceToHost)); checkCuda(cudaFree(deviceSeeds)); } static void printDevProp(const cudaDeviceProp *devProp) { fprintf(stderr, "Revision number: %d.%d\n", devProp->major, devProp->minor); fprintf(stderr, "Name: %s\n", devProp->name); fprintf(stderr, "Total global memory: %" PRIu64 " bytes\n", devProp->totalGlobalMem); fprintf(stderr, "Total constant memory: %" PRIu64 " bytes\n", devProp->totalConstMem); fprintf(stderr, "L2 cache size: %u bytes\n", devProp->l2CacheSize); fprintf(stderr, "Maximum threads per block: %d\n", devProp->maxThreadsPerBlock); fprintf(stderr, "Shared memory per block: %" PRIu64 " bytes\n", devProp->sharedMemPerBlock); fprintf(stderr, "Registers per block: %d\n", devProp->regsPerBlock); fprintf(stderr, "Maximum threads per multiprocessor: %d\n", devProp->maxThreadsPerMultiProcessor); fprintf(stderr, "Shared mem per multiprocessor: %" PRIu64 " bytes\n", devProp->sharedMemPerMultiprocessor); fprintf(stderr, "Registers per multiprocessor: %d\n", devProp->regsPerMultiprocessor); fprintf(stderr, "Warp size: %d\n", devProp->warpSize); fprintf(stderr, "Maximum memory pitch: %" PRIu64 " bytes\n", devProp->memPitch); for (int i = 0; i < 3; ++i) fprintf(stderr, "Maximum dimension %d of block: %d\n", i, devProp->maxThreadsDim[i]); for (int i = 0; i < 3; ++i) fprintf(stderr, "Maximum dimension %d of grid: %d\n", i, devProp->maxGridSize[i]); fprintf(stderr, "Global memory bus width: %d bits\n", devProp->memoryBusWidth); fprintf(stderr, "Peak memory clock frequency: %d kHz\n", devProp->memoryClockRate); fprintf(stderr, "Clock rate: %d kHz\n", devProp->clockRate); fprintf(stderr, "Texture alignment: %" PRIu64 "\n", devProp->textureAlignment); fprintf(stderr, "Texture pitch alignment: %" PRIu64 "\n", devProp->texturePitchAlignment); fprintf(stderr, "Concurrent kernels: %s\n", devProp->concurrentKernels ? "Yes" : "No"); fprintf(stderr, "Concurrent copy and execution: %s\n", devProp->deviceOverlap ? "Yes" : "No"); fprintf(stderr, "Number of async engines: %d\n", devProp->asyncEngineCount); fprintf(stderr, "Number of multiprocessors: %d\n", devProp->multiProcessorCount); fprintf(stderr, "Kernel execution timeout: %s\n", devProp->kernelExecTimeoutEnabled ? "Yes" : "No"); fprintf(stderr, "Unified addressing with host: %s\n", devProp->unifiedAddressing ? "Yes" : "No"); fprintf(stderr, "Device can map host memory: %s\n", devProp->canMapHostMemory ? "Yes" : "No"); fprintf(stderr, "Device supports managed memory: %s\n", devProp->managedMemory ? "Yes" : "No"); return; } void printCUDAProp() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); fprintf(stderr, "CUDA Device Query...\n"); fprintf(stderr, "There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties fprintf(stderr, "\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(&devProp); } } #ifdef __cplusplus } #endif
3b6aa0ec119650b9f5ea1988c1c20f8f6613fd14.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cstdio> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "rawtodigi_cuda.h" #include "cudaCheck.h" namespace cuda { class Packing { public: using PackedDigiType = uint32_t; // Constructor: pre-computes masks and shifts from field widths __host__ __device__ inline constexpr Packing(unsigned int row_w, unsigned int column_w, unsigned int time_w, unsigned int adc_w) : row_width(row_w), column_width(column_w), adc_width(adc_w), row_shift(0), column_shift(row_shift + row_w), time_shift(column_shift + column_w), adc_shift(time_shift + time_w), row_mask(~(~0U << row_w)), column_mask( ~(~0U << column_w)), time_mask(~(~0U << time_w)), adc_mask(~(~0U << adc_w)), rowcol_mask(~(~0U << (column_w+row_w))), max_row(row_mask), max_column(column_mask), max_adc(adc_mask) { } uint32_t row_width; uint32_t column_width; uint32_t adc_width; uint32_t row_shift; uint32_t column_shift; uint32_t time_shift; uint32_t adc_shift; PackedDigiType row_mask; PackedDigiType column_mask; PackedDigiType time_mask; PackedDigiType adc_mask; PackedDigiType rowcol_mask; uint32_t max_row; uint32_t max_column; uint32_t max_adc; }; __host__ __device__ inline constexpr Packing packing() { return Packing(11, 11, 0, 10); } __host__ __device__ inline uint32_t pack(uint32_t row, uint32_t col, uint32_t adc) { constexpr Packing thePacking = packing(); adc = ::min(adc, thePacking.max_adc); return (row << thePacking.row_shift) | (col << thePacking.column_shift) | (adc << thePacking.adc_shift); } __host__ __device__ uint32_t getLink(uint32_t ww) { return ((ww >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask); } __host__ __device__ uint32_t getRoc(uint32_t ww) { return ((ww >> pixelgpudetails::ROC_shift ) & pixelgpudetails::ROC_mask); } __host__ __device__ uint32_t getADC(uint32_t ww) { return ((ww >> pixelgpudetails::ADC_shift) & pixelgpudetails::ADC_mask); } __host__ __device__ bool isBarrel(uint32_t rawId) { return (1==((rawId>>25)&0x7)); } __host__ __device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) { constexpr uint32_t numRowsInRoc = 80; constexpr uint32_t numColsInRoc = 52; /// row and collumn in ROC representation return ((rocRow < numRowsInRoc) & (rocCol < numColsInRoc)); } __host__ __device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); } __host__ __device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelFedCablingMapGPU * cablingMap, uint8_t fed, uint32_t link, uint32_t roc) { uint32_t index = fed * pixelgpudetails::MAX_LINK * pixelgpudetails::MAX_ROC + (link-1) * pixelgpudetails::MAX_ROC + roc; pixelgpudetails::DetIdGPU detId = { cablingMap->RawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index] }; return detId; } __host__ __device__ pixelgpudetails::Pixel frameConversion(bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) { int slopeRow = 0, slopeCol = 0; int rowOffset = 0, colOffset = 0; if (bpix) { if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8-rocIdInDetUnit)*pixelgpudetails::numColsInRoc-1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2*pixelgpudetails::numRowsInRoc-1; colOffset = (rocIdInDetUnit-8)*pixelgpudetails::numColsInRoc; } // if roc } else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1 if (rocIdInDetUnit < 8) { slopeRow = -1; slopeCol = 1; rowOffset = 2*pixelgpudetails::numRowsInRoc-1; colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc; } else { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (16-rocIdInDetUnit)*pixelgpudetails::numColsInRoc-1; } } } else { // fpix if (side==-1) { // pannel 1 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8-rocIdInDetUnit)*pixelgpudetails::numColsInRoc-1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2*pixelgpudetails::numRowsInRoc-1; colOffset = (rocIdInDetUnit-8)*pixelgpudetails::numColsInRoc; } } else { // pannel 2 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8-rocIdInDetUnit)*pixelgpudetails::numColsInRoc-1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2*pixelgpudetails::numRowsInRoc-1; colOffset = (rocIdInDetUnit-8)*pixelgpudetails::numColsInRoc; } } // side } uint32_t gRow = rowOffset+slopeRow*local.row; uint32_t gCol = colOffset+slopeCol*local.col; //printf("Inside frameConversion row: %u, column: %u\n", gRow, gCol); pixelgpudetails::Pixel global = {gRow, gCol}; return global; } __host__ __device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) { // debug = true; if(debug) { switch (status) { case(1) : { printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId ); break; } case(2) : { printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId); break; } case(3) : { printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId); break; } case(4) : { printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId); break; } default: if (debug) printf("Cabling check returned unexpected result, status = %i\n", status); }; } if(status >= 1 and status <= 4) { return status + 34; } return 0; } __host__ __device__ uint32_t getErrRawID(uint8_t fedId, uint32_t errWord, uint32_t errorType, const SiPixelFedCablingMapGPU *cablingMap, bool debug = false) { uint32_t rID = 0xffffffff; switch (errorType) { case 25 : case 30 : case 31 : case 36 : case 40 : { //set dummy values for cabling just to get detId from link //cabling.dcol = 0; //cabling.pxid = 2; constexpr uint32_t roc = 1; const uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask; const uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId; if (rID_temp != 9999) rID = rID_temp; break; } case 29 : { int chanNmbr = 0; constexpr int DB0_shift = 0; constexpr int DB1_shift = DB0_shift + 1; constexpr int DB2_shift = DB1_shift + 1; constexpr int DB3_shift = DB2_shift + 1; constexpr int DB4_shift = DB3_shift + 1; constexpr uint32_t DataBit_mask = ~(~uint32_t(0) << 1); const int CH1 = (errWord >> DB0_shift) & DataBit_mask; const int CH2 = (errWord >> DB1_shift) & DataBit_mask; const int CH3 = (errWord >> DB2_shift) & DataBit_mask; const int CH4 = (errWord >> DB3_shift) & DataBit_mask; const int CH5 = (errWord >> DB4_shift) & DataBit_mask; constexpr int BLOCK_bits = 3; constexpr int BLOCK_shift = 8; constexpr uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits); const int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask; const int localCH = 1*CH1+2*CH2+3*CH3+4*CH4+5*CH5; if (BLOCK%2==0) chanNmbr=(BLOCK/2)*9+localCH; else chanNmbr = ((BLOCK-1)/2)*9+4+localCH; if ((chanNmbr < 1)||(chanNmbr > 36)) break; // signifies unexpected result // set dummy values for cabling just to get detId from link if in Barrel //cabling.dcol = 0; //cabling.pxid = 2; constexpr uint32_t roc = 1; const uint32_t link = chanNmbr; const uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId; if(rID_temp != 9999) rID = rID_temp; break; } case 37 : case 38: { //cabling.dcol = 0; //cabling.pxid = 2; const uint32_t roc = (errWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask; const uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask; const uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId; if(rID_temp != 9999) rID = rID_temp; break; } default: break; }; return rID; } __host__ __device__ uint8_t checkROC(uint32_t errorWord, uint8_t fedId, uint32_t link, const SiPixelFedCablingMapGPU *cablingMap, bool debug = false) { uint8_t errorType = (errorWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ERROR_mask; if (errorType < 25) return 0; bool errorFound = false; switch (errorType) { case(25) : { errorFound = true; uint32_t index = fedId * pixelgpudetails::MAX_LINK * pixelgpudetails::MAX_ROC + (link-1) * pixelgpudetails::MAX_ROC + 1; if (index > 1 && index <= cablingMap->size) { if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index])) errorFound = false; } if (debug and errorFound) printf("Invalid ROC = 25 found (errorType = 25)\n"); break; } case(26) : { if (debug) printf("Gap word found (errorType = 26)\n"); errorFound = true; break; } case(27) : { if (debug) printf("Dummy word found (errorType = 27)\n"); errorFound = true; break; } case(28) : { if (debug) printf("Error fifo nearly full (errorType = 28)\n"); errorFound = true; break; } case(29) : { if (debug) printf("Timeout on a channel (errorType = 29)\n"); if ((errorWord >> pixelgpudetails::OMIT_ERR_shift) & pixelgpudetails::OMIT_ERR_mask) { if (debug) printf("...first errorType=29 error, this gets masked out\n"); } errorFound = true; break; } case(30) : { if (debug) printf("TBM error trailer (errorType = 30)\n"); int StateMatch_bits = 4; int StateMatch_shift = 8; uint32_t StateMatch_mask = ~(~uint32_t(0) << StateMatch_bits); int StateMatch = (errorWord >> StateMatch_shift) & StateMatch_mask; if ( StateMatch != 1 && StateMatch != 8 ) { if (debug) printf("FED error 30 with unexpected State Bits (errorType = 30)\n"); } if (StateMatch == 1) errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30 errorFound = true; break; } case(31) : { if (debug) printf("Event number error (errorType = 31)\n"); errorFound = true; break; } default: errorFound = false; }; return errorFound ? errorType : 0; } __global__ void rawtodigi_kernel(const Input *input, Output *output, bool useQualityInfo, bool includeErrors, bool debug) { const SiPixelFedCablingMapGPU* cablingMap = &input->cablingMap; const uint32_t wordCounter = input->wordCounter; const uint32_t* word = input->word; const uint8_t* fedIds =input->fedId; uint16_t* xx = output->xx; uint16_t* yy = output->yy; uint16_t* adc = output->adc; uint32_t* pdigi = output->digi; uint32_t* rawIdArr = output->rawIdArr; uint16_t* moduleId = output->moduleInd; GPU::SimpleVector<PixelErrorCompact>* err = &output->err; int32_t first = threadIdx.x + blockIdx.x*blockDim.x; for (int32_t iloop=first, nend=wordCounter; iloop<nend; iloop+=blockDim.x*gridDim.x) { auto gIndex = iloop; xx[gIndex] = 0; yy[gIndex] = 0; adc[gIndex] = 0; bool skipROC = false; uint8_t fedId = fedIds[gIndex/2]; // +1200; // initialize (too many coninue below) pdigi[gIndex] = 0; rawIdArr[gIndex] = 0; moduleId[gIndex] = 9999; uint32_t ww = word[gIndex]; // Array containing 32 bit raw data if (ww == 0) { // 0 is an indicator of a noise/dead channel, skip these pixels during clusterization continue; } uint32_t link = getLink(ww); // Extract link uint32_t roc = getRoc(ww); // Extract Roc in link pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc); uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug); skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0); if (includeErrors and skipROC) { uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug); err->push_back(PixelErrorCompact{rID, ww, errorType, fedId}); continue; } uint32_t rawId = detId.RawId; uint32_t rocIdInDetUnit = detId.rocInDet; bool barrel = isBarrel(rawId); uint32_t index = fedId * pixelgpudetails::MAX_LINK * pixelgpudetails::MAX_ROC + (link-1) * pixelgpudetails::MAX_ROC + roc; if (useQualityInfo) { skipROC = cablingMap->badRocs[index]; if (skipROC) continue; } uint32_t layer = 0;//, ladder =0; int side = 0, panel = 0, module = 0;//disk = 0, blade = 0 if (barrel) { layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask; module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask; side = (module < 5)? -1 : 1; } else { // endcap ids layer = 0; panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask; //disk = (rawId >> diskStartBit_) & diskMask_; side = (panel == 1)? -1 : 1; //blade = (rawId >> bladeStartBit_) & bladeMask_; } // ***special case of layer to 1 be handled here pixelgpudetails::Pixel localPix; if (layer == 1) { uint32_t col = (ww >> pixelgpudetails::COL_shift) & pixelgpudetails::COL_mask; uint32_t row = (ww >> pixelgpudetails::ROW_shift) & pixelgpudetails::ROW_mask; localPix.row = row; localPix.col = col; if (includeErrors) { if (not rocRowColIsValid(row, col)) { uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays err->push_back(PixelErrorCompact{rawId, ww, error, fedId}); if(debug) printf("BPIX1 Error status: %i\n", error); continue; } } } else { // ***conversion rules for dcol and pxid uint32_t dcol = (ww >> pixelgpudetails::DCOL_shift) & pixelgpudetails::DCOL_mask; uint32_t pxid = (ww >> pixelgpudetails::PXID_shift) & pixelgpudetails::PXID_mask; uint32_t row = pixelgpudetails::numRowsInRoc - pxid/2; uint32_t col = dcol*2 + pxid%2; localPix.row = row; localPix.col = col; if (includeErrors and not dcolIsValid(dcol, pxid)) { uint8_t error = conversionError(fedId, 3, debug); err->push_back(PixelErrorCompact{rawId, ww, error, fedId}); if(debug) printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc); continue; } } pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix); xx[gIndex] = globalPix.row; // origin shifting by 1 0-159 yy[gIndex] = globalPix.col; // origin shifting by 1 0-415 adc[gIndex] = getADC(ww); pdigi[gIndex] = pack(globalPix.row, globalPix.col, adc[gIndex]); moduleId[gIndex] = detId.moduleId; rawIdArr[gIndex] = rawId; } // end of loop (gIndex < end) } // end of Raw to Digi kernel void rawtodigi(const Input *input_d, Output *output_d, const uint32_t wordCounter, bool useQualityInfo, bool includeErrors, bool debug, hipStream_t stream) { const int threadsPerBlock = 512; const int blocks = (wordCounter + threadsPerBlock-1) /threadsPerBlock; // fill it all hipLaunchKernelGGL(( rawtodigi_kernel), dim3(blocks), dim3(threadsPerBlock), 0, stream, input_d, output_d, useQualityInfo, includeErrors, debug); cudaCheck(hipGetLastError()); } } // end namespace cuda
3b6aa0ec119650b9f5ea1988c1c20f8f6613fd14.cu
#include <algorithm> #include <cstdio> #include <cuda_runtime.h> #include <cuda.h> #include "rawtodigi_cuda.h" #include "cudaCheck.h" namespace cuda { class Packing { public: using PackedDigiType = uint32_t; // Constructor: pre-computes masks and shifts from field widths __host__ __device__ inline constexpr Packing(unsigned int row_w, unsigned int column_w, unsigned int time_w, unsigned int adc_w) : row_width(row_w), column_width(column_w), adc_width(adc_w), row_shift(0), column_shift(row_shift + row_w), time_shift(column_shift + column_w), adc_shift(time_shift + time_w), row_mask(~(~0U << row_w)), column_mask( ~(~0U << column_w)), time_mask(~(~0U << time_w)), adc_mask(~(~0U << adc_w)), rowcol_mask(~(~0U << (column_w+row_w))), max_row(row_mask), max_column(column_mask), max_adc(adc_mask) { } uint32_t row_width; uint32_t column_width; uint32_t adc_width; uint32_t row_shift; uint32_t column_shift; uint32_t time_shift; uint32_t adc_shift; PackedDigiType row_mask; PackedDigiType column_mask; PackedDigiType time_mask; PackedDigiType adc_mask; PackedDigiType rowcol_mask; uint32_t max_row; uint32_t max_column; uint32_t max_adc; }; __host__ __device__ inline constexpr Packing packing() { return Packing(11, 11, 0, 10); } __host__ __device__ inline uint32_t pack(uint32_t row, uint32_t col, uint32_t adc) { constexpr Packing thePacking = packing(); adc = std::min(adc, thePacking.max_adc); return (row << thePacking.row_shift) | (col << thePacking.column_shift) | (adc << thePacking.adc_shift); } __host__ __device__ uint32_t getLink(uint32_t ww) { return ((ww >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask); } __host__ __device__ uint32_t getRoc(uint32_t ww) { return ((ww >> pixelgpudetails::ROC_shift ) & pixelgpudetails::ROC_mask); } __host__ __device__ uint32_t getADC(uint32_t ww) { return ((ww >> pixelgpudetails::ADC_shift) & pixelgpudetails::ADC_mask); } __host__ __device__ bool isBarrel(uint32_t rawId) { return (1==((rawId>>25)&0x7)); } __host__ __device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) { constexpr uint32_t numRowsInRoc = 80; constexpr uint32_t numColsInRoc = 52; /// row and collumn in ROC representation return ((rocRow < numRowsInRoc) & (rocCol < numColsInRoc)); } __host__ __device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); } __host__ __device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelFedCablingMapGPU * cablingMap, uint8_t fed, uint32_t link, uint32_t roc) { uint32_t index = fed * pixelgpudetails::MAX_LINK * pixelgpudetails::MAX_ROC + (link-1) * pixelgpudetails::MAX_ROC + roc; pixelgpudetails::DetIdGPU detId = { cablingMap->RawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index] }; return detId; } __host__ __device__ pixelgpudetails::Pixel frameConversion(bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) { int slopeRow = 0, slopeCol = 0; int rowOffset = 0, colOffset = 0; if (bpix) { if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8-rocIdInDetUnit)*pixelgpudetails::numColsInRoc-1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2*pixelgpudetails::numRowsInRoc-1; colOffset = (rocIdInDetUnit-8)*pixelgpudetails::numColsInRoc; } // if roc } else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1 if (rocIdInDetUnit < 8) { slopeRow = -1; slopeCol = 1; rowOffset = 2*pixelgpudetails::numRowsInRoc-1; colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc; } else { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (16-rocIdInDetUnit)*pixelgpudetails::numColsInRoc-1; } } } else { // fpix if (side==-1) { // pannel 1 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8-rocIdInDetUnit)*pixelgpudetails::numColsInRoc-1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2*pixelgpudetails::numRowsInRoc-1; colOffset = (rocIdInDetUnit-8)*pixelgpudetails::numColsInRoc; } } else { // pannel 2 if (rocIdInDetUnit < 8) { slopeRow = 1; slopeCol = -1; rowOffset = 0; colOffset = (8-rocIdInDetUnit)*pixelgpudetails::numColsInRoc-1; } else { slopeRow = -1; slopeCol = 1; rowOffset = 2*pixelgpudetails::numRowsInRoc-1; colOffset = (rocIdInDetUnit-8)*pixelgpudetails::numColsInRoc; } } // side } uint32_t gRow = rowOffset+slopeRow*local.row; uint32_t gCol = colOffset+slopeCol*local.col; //printf("Inside frameConversion row: %u, column: %u\n", gRow, gCol); pixelgpudetails::Pixel global = {gRow, gCol}; return global; } __host__ __device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) { // debug = true; if(debug) { switch (status) { case(1) : { printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId ); break; } case(2) : { printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId); break; } case(3) : { printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId); break; } case(4) : { printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId); break; } default: if (debug) printf("Cabling check returned unexpected result, status = %i\n", status); }; } if(status >= 1 and status <= 4) { return status + 34; } return 0; } __host__ __device__ uint32_t getErrRawID(uint8_t fedId, uint32_t errWord, uint32_t errorType, const SiPixelFedCablingMapGPU *cablingMap, bool debug = false) { uint32_t rID = 0xffffffff; switch (errorType) { case 25 : case 30 : case 31 : case 36 : case 40 : { //set dummy values for cabling just to get detId from link //cabling.dcol = 0; //cabling.pxid = 2; constexpr uint32_t roc = 1; const uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask; const uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId; if (rID_temp != 9999) rID = rID_temp; break; } case 29 : { int chanNmbr = 0; constexpr int DB0_shift = 0; constexpr int DB1_shift = DB0_shift + 1; constexpr int DB2_shift = DB1_shift + 1; constexpr int DB3_shift = DB2_shift + 1; constexpr int DB4_shift = DB3_shift + 1; constexpr uint32_t DataBit_mask = ~(~uint32_t(0) << 1); const int CH1 = (errWord >> DB0_shift) & DataBit_mask; const int CH2 = (errWord >> DB1_shift) & DataBit_mask; const int CH3 = (errWord >> DB2_shift) & DataBit_mask; const int CH4 = (errWord >> DB3_shift) & DataBit_mask; const int CH5 = (errWord >> DB4_shift) & DataBit_mask; constexpr int BLOCK_bits = 3; constexpr int BLOCK_shift = 8; constexpr uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits); const int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask; const int localCH = 1*CH1+2*CH2+3*CH3+4*CH4+5*CH5; if (BLOCK%2==0) chanNmbr=(BLOCK/2)*9+localCH; else chanNmbr = ((BLOCK-1)/2)*9+4+localCH; if ((chanNmbr < 1)||(chanNmbr > 36)) break; // signifies unexpected result // set dummy values for cabling just to get detId from link if in Barrel //cabling.dcol = 0; //cabling.pxid = 2; constexpr uint32_t roc = 1; const uint32_t link = chanNmbr; const uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId; if(rID_temp != 9999) rID = rID_temp; break; } case 37 : case 38: { //cabling.dcol = 0; //cabling.pxid = 2; const uint32_t roc = (errWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ROC_mask; const uint32_t link = (errWord >> pixelgpudetails::LINK_shift) & pixelgpudetails::LINK_mask; const uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).RawId; if(rID_temp != 9999) rID = rID_temp; break; } default: break; }; return rID; } __host__ __device__ uint8_t checkROC(uint32_t errorWord, uint8_t fedId, uint32_t link, const SiPixelFedCablingMapGPU *cablingMap, bool debug = false) { uint8_t errorType = (errorWord >> pixelgpudetails::ROC_shift) & pixelgpudetails::ERROR_mask; if (errorType < 25) return 0; bool errorFound = false; switch (errorType) { case(25) : { errorFound = true; uint32_t index = fedId * pixelgpudetails::MAX_LINK * pixelgpudetails::MAX_ROC + (link-1) * pixelgpudetails::MAX_ROC + 1; if (index > 1 && index <= cablingMap->size) { if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index])) errorFound = false; } if (debug and errorFound) printf("Invalid ROC = 25 found (errorType = 25)\n"); break; } case(26) : { if (debug) printf("Gap word found (errorType = 26)\n"); errorFound = true; break; } case(27) : { if (debug) printf("Dummy word found (errorType = 27)\n"); errorFound = true; break; } case(28) : { if (debug) printf("Error fifo nearly full (errorType = 28)\n"); errorFound = true; break; } case(29) : { if (debug) printf("Timeout on a channel (errorType = 29)\n"); if ((errorWord >> pixelgpudetails::OMIT_ERR_shift) & pixelgpudetails::OMIT_ERR_mask) { if (debug) printf("...first errorType=29 error, this gets masked out\n"); } errorFound = true; break; } case(30) : { if (debug) printf("TBM error trailer (errorType = 30)\n"); int StateMatch_bits = 4; int StateMatch_shift = 8; uint32_t StateMatch_mask = ~(~uint32_t(0) << StateMatch_bits); int StateMatch = (errorWord >> StateMatch_shift) & StateMatch_mask; if ( StateMatch != 1 && StateMatch != 8 ) { if (debug) printf("FED error 30 with unexpected State Bits (errorType = 30)\n"); } if (StateMatch == 1) errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30 errorFound = true; break; } case(31) : { if (debug) printf("Event number error (errorType = 31)\n"); errorFound = true; break; } default: errorFound = false; }; return errorFound ? errorType : 0; } __global__ void rawtodigi_kernel(const Input *input, Output *output, bool useQualityInfo, bool includeErrors, bool debug) { const SiPixelFedCablingMapGPU* cablingMap = &input->cablingMap; const uint32_t wordCounter = input->wordCounter; const uint32_t* word = input->word; const uint8_t* fedIds =input->fedId; uint16_t* xx = output->xx; uint16_t* yy = output->yy; uint16_t* adc = output->adc; uint32_t* pdigi = output->digi; uint32_t* rawIdArr = output->rawIdArr; uint16_t* moduleId = output->moduleInd; GPU::SimpleVector<PixelErrorCompact>* err = &output->err; int32_t first = threadIdx.x + blockIdx.x*blockDim.x; for (int32_t iloop=first, nend=wordCounter; iloop<nend; iloop+=blockDim.x*gridDim.x) { auto gIndex = iloop; xx[gIndex] = 0; yy[gIndex] = 0; adc[gIndex] = 0; bool skipROC = false; uint8_t fedId = fedIds[gIndex/2]; // +1200; // initialize (too many coninue below) pdigi[gIndex] = 0; rawIdArr[gIndex] = 0; moduleId[gIndex] = 9999; uint32_t ww = word[gIndex]; // Array containing 32 bit raw data if (ww == 0) { // 0 is an indicator of a noise/dead channel, skip these pixels during clusterization continue; } uint32_t link = getLink(ww); // Extract link uint32_t roc = getRoc(ww); // Extract Roc in link pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc); uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug); skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0); if (includeErrors and skipROC) { uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug); err->push_back(PixelErrorCompact{rID, ww, errorType, fedId}); continue; } uint32_t rawId = detId.RawId; uint32_t rocIdInDetUnit = detId.rocInDet; bool barrel = isBarrel(rawId); uint32_t index = fedId * pixelgpudetails::MAX_LINK * pixelgpudetails::MAX_ROC + (link-1) * pixelgpudetails::MAX_ROC + roc; if (useQualityInfo) { skipROC = cablingMap->badRocs[index]; if (skipROC) continue; } uint32_t layer = 0;//, ladder =0; int side = 0, panel = 0, module = 0;//disk = 0, blade = 0 if (barrel) { layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask; module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask; side = (module < 5)? -1 : 1; } else { // endcap ids layer = 0; panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask; //disk = (rawId >> diskStartBit_) & diskMask_; side = (panel == 1)? -1 : 1; //blade = (rawId >> bladeStartBit_) & bladeMask_; } // ***special case of layer to 1 be handled here pixelgpudetails::Pixel localPix; if (layer == 1) { uint32_t col = (ww >> pixelgpudetails::COL_shift) & pixelgpudetails::COL_mask; uint32_t row = (ww >> pixelgpudetails::ROW_shift) & pixelgpudetails::ROW_mask; localPix.row = row; localPix.col = col; if (includeErrors) { if (not rocRowColIsValid(row, col)) { uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays err->push_back(PixelErrorCompact{rawId, ww, error, fedId}); if(debug) printf("BPIX1 Error status: %i\n", error); continue; } } } else { // ***conversion rules for dcol and pxid uint32_t dcol = (ww >> pixelgpudetails::DCOL_shift) & pixelgpudetails::DCOL_mask; uint32_t pxid = (ww >> pixelgpudetails::PXID_shift) & pixelgpudetails::PXID_mask; uint32_t row = pixelgpudetails::numRowsInRoc - pxid/2; uint32_t col = dcol*2 + pxid%2; localPix.row = row; localPix.col = col; if (includeErrors and not dcolIsValid(dcol, pxid)) { uint8_t error = conversionError(fedId, 3, debug); err->push_back(PixelErrorCompact{rawId, ww, error, fedId}); if(debug) printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc); continue; } } pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix); xx[gIndex] = globalPix.row; // origin shifting by 1 0-159 yy[gIndex] = globalPix.col; // origin shifting by 1 0-415 adc[gIndex] = getADC(ww); pdigi[gIndex] = pack(globalPix.row, globalPix.col, adc[gIndex]); moduleId[gIndex] = detId.moduleId; rawIdArr[gIndex] = rawId; } // end of loop (gIndex < end) } // end of Raw to Digi kernel void rawtodigi(const Input *input_d, Output *output_d, const uint32_t wordCounter, bool useQualityInfo, bool includeErrors, bool debug, cudaStream_t stream) { const int threadsPerBlock = 512; const int blocks = (wordCounter + threadsPerBlock-1) /threadsPerBlock; // fill it all rawtodigi_kernel<<<blocks, threadsPerBlock, 0, stream>>>(input_d, output_d, useQualityInfo, includeErrors, debug); cudaCheck(cudaGetLastError()); } } // end namespace cuda
62fc659678d7c0eabbebdb5e27d22f78e88369be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include <cmath> #include "caffe/layers/label_specific_hard_margin.hpp" namespace caffe { template <typename Dtype> __global__ void LabelSpecificHardMarginForward(const int num, const int dim, const Dtype* bottom_data, const Dtype* label, Dtype* row_sum, Dtype* top_data, Dtype positive_weight) { CUDA_KERNEL_LOOP(index, num) { int gt = static_cast<int>(label[index]); row_sum[index] = (row_sum[index] - bottom_data[index * dim + gt]) / (dim - 1); top_data[index * dim + gt] = bottom_data[index * dim + gt] * positive_weight + row_sum[index] * (1 - positive_weight); row_sum[index] = top_data[index * dim + gt] - bottom_data[index * dim + gt]; } } template <typename Dtype> __global__ void LabelSpecificHardMarginBackward(const int num, const int dim, const Dtype* top_diff, const Dtype* label, const Dtype* bottom_data, Dtype* bottom_diff, Dtype positive_weight) { CUDA_KERNEL_LOOP(index, num) { int gt = static_cast<int>(label[index]); bottom_diff[index * dim + gt] = top_diff[index * dim + gt] * positive_weight; } } template <typename Dtype> __global__ void LabelSpecificHardMarginBackwardNegative(const int num, const int dim, const Dtype* top_diff, const Dtype* label, const Dtype* bottom_data, Dtype* bottom_diff, Dtype positive_weight) { CUDA_KERNEL_LOOP(index, num * dim) { int n = index / dim; int d = index % dim; int gt = static_cast<int>(label[n]); if (d != gt) { bottom_diff[n * dim + d] += top_diff[n * dim + gt] * (1 - positive_weight) / (dim - 1); } } } template <typename Dtype> void LabelSpecificHardMarginLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int num = bottom[0]->num(); int count = bottom[0]->count(); int dim = count / num; caffe_copy(count, bottom_data, top_data); if (this->phase_ == TEST) return; caffe_gpu_gemv(CblasNoTrans, num, dim, Dtype(1), bottom_data, sum_multiplier_.gpu_data(), Dtype(0), margins_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) LabelSpecificHardMarginForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > ( num, dim, bottom_data, label_data, margins_.mutable_gpu_data(), top_data, positive_weight); CUDA_POST_KERNEL_CHECK; if (top.size() == 2) { top[1]->mutable_cpu_data()[0] = margins_.asum_data() / Dtype(num) / Dtype(M_PI) * Dtype(180.0); } } template <typename Dtype> void LabelSpecificHardMarginLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int num = bottom[0]->num(); int count = bottom[0]->count(); int dim = count / num; if (propagate_down[0]) { caffe_copy(count, top_diff, bottom_diff); if (this->phase_ == TEST) return; // NOLINT_NEXT_LINE(whitespace/operators) LabelSpecificHardMarginBackward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > ( num, dim, top_diff, label_data, bottom_data, bottom_diff, positive_weight); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) LabelSpecificHardMarginBackwardNegative<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > ( num, dim, top_diff, label_data, bottom_data, bottom_diff, positive_weight); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(LabelSpecificHardMarginLayer); } // namespace caffe
62fc659678d7c0eabbebdb5e27d22f78e88369be.cu
#include <algorithm> #include <vector> #include <cmath> #include "caffe/layers/label_specific_hard_margin.hpp" namespace caffe { template <typename Dtype> __global__ void LabelSpecificHardMarginForward(const int num, const int dim, const Dtype* bottom_data, const Dtype* label, Dtype* row_sum, Dtype* top_data, Dtype positive_weight) { CUDA_KERNEL_LOOP(index, num) { int gt = static_cast<int>(label[index]); row_sum[index] = (row_sum[index] - bottom_data[index * dim + gt]) / (dim - 1); top_data[index * dim + gt] = bottom_data[index * dim + gt] * positive_weight + row_sum[index] * (1 - positive_weight); row_sum[index] = top_data[index * dim + gt] - bottom_data[index * dim + gt]; } } template <typename Dtype> __global__ void LabelSpecificHardMarginBackward(const int num, const int dim, const Dtype* top_diff, const Dtype* label, const Dtype* bottom_data, Dtype* bottom_diff, Dtype positive_weight) { CUDA_KERNEL_LOOP(index, num) { int gt = static_cast<int>(label[index]); bottom_diff[index * dim + gt] = top_diff[index * dim + gt] * positive_weight; } } template <typename Dtype> __global__ void LabelSpecificHardMarginBackwardNegative(const int num, const int dim, const Dtype* top_diff, const Dtype* label, const Dtype* bottom_data, Dtype* bottom_diff, Dtype positive_weight) { CUDA_KERNEL_LOOP(index, num * dim) { int n = index / dim; int d = index % dim; int gt = static_cast<int>(label[n]); if (d != gt) { bottom_diff[n * dim + d] += top_diff[n * dim + gt] * (1 - positive_weight) / (dim - 1); } } } template <typename Dtype> void LabelSpecificHardMarginLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int num = bottom[0]->num(); int count = bottom[0]->count(); int dim = count / num; caffe_copy(count, bottom_data, top_data); if (this->phase_ == TEST) return; caffe_gpu_gemv(CblasNoTrans, num, dim, Dtype(1), bottom_data, sum_multiplier_.gpu_data(), Dtype(0), margins_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) LabelSpecificHardMarginForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > ( num, dim, bottom_data, label_data, margins_.mutable_gpu_data(), top_data, positive_weight); CUDA_POST_KERNEL_CHECK; if (top.size() == 2) { top[1]->mutable_cpu_data()[0] = margins_.asum_data() / Dtype(num) / Dtype(M_PI) * Dtype(180.0); } } template <typename Dtype> void LabelSpecificHardMarginLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int num = bottom[0]->num(); int count = bottom[0]->count(); int dim = count / num; if (propagate_down[0]) { caffe_copy(count, top_diff, bottom_diff); if (this->phase_ == TEST) return; // NOLINT_NEXT_LINE(whitespace/operators) LabelSpecificHardMarginBackward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > ( num, dim, top_diff, label_data, bottom_data, bottom_diff, positive_weight); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) LabelSpecificHardMarginBackwardNegative<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > ( num, dim, top_diff, label_data, bottom_data, bottom_diff, positive_weight); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(LabelSpecificHardMarginLayer); } // namespace caffe
1125dcc9a5f9ade662920f7d410c327716368669.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for conversion operators. */ #include "../common/cutlass_unit_test.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Simple conversion function template <typename Destination, typename Source, int Count> __global__ void convert( cutlass::Array<Destination, Count> *destination, cutlass::Array<Source, Count> const *source) { cutlass::NumericArrayConverter<Destination, Source, Count> convert; *destination = convert(*source); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Destination, typename Source, int Count> void run_test() { const int kN = Count; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = Source(i % 4); } source.sync_device(); hipLaunchKernelGGL(( convert<Destination, Source, kN>), dim3(grid), dim3(block) , 0, 0, reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == float(source.host_data()[i])); } } } // namespace kernel } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_f16_rn) { int const kN = 1; using Source = float; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f32x8_to_f16x8_rn) { int const kN = 8; using Source = float; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f16_to_f32_rn) { int const kN = 1; using Source = cutlass::half_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16x8_to_f32x8_rn) { int const kN = 8; using Source = cutlass::half_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_fe4m3_rn) { int const kN = 1; using Source = float; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f32_to_fe4m3_rn_array) { int const kN = 27; using Source = float; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f32_to_fe5m2_rn) { int const kN = 1; using Source = float; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f32_to_fe5m2_rn_array) { int const kN = 27; using Source = float; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::half_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16_to_fe4m3_rn_array) { int const kN = 27; using Source = cutlass::half_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::half_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16_to_fe5m2_rn_array) { int const kN = 27; using Source = cutlass::half_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, bf16_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::bfloat16_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, bf16_to_fe4m3_rn_array) { int const kN = 27; using Source = cutlass::bfloat16_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, bf16_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::bfloat16_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, bf16_to_fe5m2_rn_array) { int const kN = 27; using Source = cutlass::bfloat16_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, fe4m3_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_fe5m2_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_fe4m3_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_f32_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32x8_to_s8x8_rn) { int const kN = 8; using Source = float; using Destination = int8_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_f32_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_f32_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_f16_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_f16_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_f16_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_f16_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_bf16_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; using Destination = cutlass::bfloat16_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_bf16_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; using Destination = cutlass::bfloat16_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_bf16_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; using Destination = cutlass::bfloat16_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_bf16_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; using Destination = cutlass::bfloat16_t; test::core::kernel::run_test<Destination, Source, kN>(); } /////////////////////////////////////////////////////////////////////////////////////////////////
1125dcc9a5f9ade662920f7d410c327716368669.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for conversion operators. */ #include "../common/cutlass_unit_test.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Simple conversion function template <typename Destination, typename Source, int Count> __global__ void convert( cutlass::Array<Destination, Count> *destination, cutlass::Array<Source, Count> const *source) { cutlass::NumericArrayConverter<Destination, Source, Count> convert; *destination = convert(*source); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Destination, typename Source, int Count> void run_test() { const int kN = Count; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = Source(i % 4); } source.sync_device(); convert<Destination, Source, kN><<< grid, block >>>( reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == float(source.host_data()[i])); } } } // namespace kernel } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_f16_rn) { int const kN = 1; using Source = float; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f32x8_to_f16x8_rn) { int const kN = 8; using Source = float; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f16_to_f32_rn) { int const kN = 1; using Source = cutlass::half_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16x8_to_f32x8_rn) { int const kN = 8; using Source = cutlass::half_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_fe4m3_rn) { int const kN = 1; using Source = float; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f32_to_fe4m3_rn_array) { int const kN = 27; using Source = float; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f32_to_fe5m2_rn) { int const kN = 1; using Source = float; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f32_to_fe5m2_rn_array) { int const kN = 27; using Source = float; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::half_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16_to_fe4m3_rn_array) { int const kN = 27; using Source = cutlass::half_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::half_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, f16_to_fe5m2_rn_array) { int const kN = 27; using Source = cutlass::half_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, bf16_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::bfloat16_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, bf16_to_fe4m3_rn_array) { int const kN = 27; using Source = cutlass::bfloat16_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, bf16_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::bfloat16_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, bf16_to_fe5m2_rn_array) { int const kN = 27; using Source = cutlass::bfloat16_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, fe4m3_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_fe5m2_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; using Destination = cutlass::float_e5m2_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_fe4m3_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; using Destination = cutlass::float_e4m3_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_f32_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32x8_to_s8x8_rn) { int const kN = 8; using Source = float; using Destination = int8_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_f32_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_f32_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; using Destination = float; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_f16_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_f16_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_f16_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_f16_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; using Destination = cutlass::half_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_bf16_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; using Destination = cutlass::bfloat16_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe4m3_to_bf16_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; using Destination = cutlass::bfloat16_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_bf16_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; using Destination = cutlass::bfloat16_t; test::core::kernel::run_test<Destination, Source, kN>(); } TEST(NumericConversion, fe5m2_to_bf16_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; using Destination = cutlass::bfloat16_t; test::core::kernel::run_test<Destination, Source, kN>(); } /////////////////////////////////////////////////////////////////////////////////////////////////
6bded826e1503f3537864cf20054057f6f9c8b35.hip
// !!! This is a file automatically generated by hipify!!! //====================================== // // GPU //====================================== #include"stdafx.h" #include"UpConvolution_DATA.hpp" #include"UpConvolution_FUNC.hpp" #include"UpConvolution_Base.h" #include"UpConvolution_GPU.cuh" #include"UpConvolution_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** */ UpConvolution_GPU::UpConvolution_GPU(Gravisbell::GUID guid, UpConvolution_LayerData_GPU& i_layerData) : UpConvolution_Base (guid) , layerData (i_layerData) /**< */ , inputBufferCount (0) /**< */ , neuronCount (0) /**< */ , outputBufferCount (0) /**< */ , cudnnHandle (NULL) , inputTensorDesc (NULL) , outputTensorDesc (NULL) , biasTensorDesc (NULL) , filterDesc (NULL) , convDesc (NULL) { cudnnCreate(&cudnnHandle); cudnnCreateTensorDescriptor(&inputTensorDesc); cudnnCreateTensorDescriptor(&outputTensorDesc); cudnnCreateTensorDescriptor(&biasTensorDesc); cudnnCreateFilterDescriptor(&filterDesc); cudnnCreateConvolutionDescriptor(&convDesc); } /** */ UpConvolution_GPU::~UpConvolution_GPU() { if(convDesc) cudnnDestroyConvolutionDescriptor(convDesc); if(filterDesc) cudnnDestroyFilterDescriptor(filterDesc); if(biasTensorDesc) cudnnDestroyTensorDescriptor(biasTensorDesc); if(outputTensorDesc) cudnnDestroyTensorDescriptor(outputTensorDesc); if(inputTensorDesc) cudnnDestroyTensorDescriptor(inputTensorDesc); if(cudnnHandle) cudnnDestroy(cudnnHandle); } //================================ // //================================ /** */ U32 UpConvolution_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** . @return 0 */ ErrorCode UpConvolution_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // //=========================== /** */ UpConvolution_LayerData_Base& UpConvolution_GPU::GetLayerData() { return this->layerData; } const UpConvolution_LayerData_Base& UpConvolution_GPU::GetLayerData()const { return this->layerData; } //================================ // //================================ /** .() @param batchSize . NN. PreProcessLearnLoop. */ ErrorCode UpConvolution_GPU::PreProcessLearn(unsigned int batchSize) { ErrorCode errorCode = this->PreProcessCalculate(batchSize); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; // this->lpDInputBuffer.resize(this->batchSize * this->inputBufferCount); // / this->lpDBias.resize(this->layerData.lpBias_d.size()); this->lppDNeuron.resize(this->layerData.lppNeuron_d.size()); return ErrorCode::ERROR_CODE_NONE; } /** .() @param batchSize . NN. Calculate. */ ErrorCode UpConvolution_GPU::PreProcessCalculate(unsigned int batchSize) { this->batchSize = batchSize; // this->inputBufferCount = this->GetInputBufferCount(); if(this->inputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; // this->neuronCount = this->layerData.layerStructure.Output_Channel; if(this->neuronCount == 0) return ErrorCode::ERROR_CODE_FRAUD_NEURON_COUNT; // this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; cudnnStatus_t err_cudnn; // S32 dataDim = 1 + 1 + 0; // + + 0 std::vector<S32> dimInput; // std::vector<S32> dimInputStride; // std::vector<S32> dimBias; std::vector<S32> dimBiasStride; std::vector<S32> dimOutput; std::vector<S32> dimOutputStride; S32 filterDim = 0; // + + std::vector<S32> dimFilter; S32 convDim = 0; // std::vector<S32> dimStride; std::vector<S32> dimDilation; std::vector<S32> dimPadding; if(this->layerData.inputDataStruct.z > 1) { dataDim = 1 + 1 + 3; dimInput.resize(dataDim); dimInput[0] = this->batchSize; dimInput[1] = this->layerData.inputDataStruct.ch; dimInput[2] = this->layerData.inputDataStruct.z * this->layerData.layerStructure.UpScale.x; dimInput[3] = this->layerData.inputDataStruct.y * this->layerData.layerStructure.UpScale.y; dimInput[4] = this->layerData.inputDataStruct.x * this->layerData.layerStructure.UpScale.z; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2] * dimInput[3] * dimInput[4]; dimInputStride[1] = dimInput[2] * dimInput[3] * dimInput[4]; dimInputStride[2] = dimInput[3] * dimInput[4]; dimInputStride[3] = dimInput[4]; dimInputStride[4] = 1; dimBias.resize(dataDim); dimBias[0] = 1; dimBias[1] = this->layerData.GetOutputDataStruct().ch; dimBias[2] = 1; dimBias[3] = 1; dimBias[4] = 1; dimBiasStride.resize(dataDim); dimBiasStride[0] = dimBias[1] * dimBias[2] * dimBias[3] * dimBias[4]; dimBiasStride[1] = dimBias[2] * dimBias[3] * dimBias[4]; dimBiasStride[2] = dimBias[3] * dimBias[4]; dimBiasStride[3] = dimBias[4]; dimBiasStride[4] = 1; dimOutput.resize(dataDim); filterDim = 1 + 1 + 3; // + + 3 dimFilter.resize(filterDim); dimFilter[0] = this->layerData.GetOutputDataStruct().ch; dimFilter[1] = this->layerData.inputDataStruct.ch; dimFilter[2] = this->layerData.layerStructure.FilterSize.z; dimFilter[3] = this->layerData.layerStructure.FilterSize.y; dimFilter[4] = this->layerData.layerStructure.FilterSize.x; convDim = 3; // 3 dimPadding.resize(convDim); dimPadding[0] = this->layerData.layerStructure.Padding.z; dimPadding[1] = this->layerData.layerStructure.Padding.y; dimPadding[2] = this->layerData.layerStructure.Padding.x; dimDilation.resize(convDim); dimDilation[0] = 1; dimDilation[1] = 1; dimDilation[2] = 1; dimStride.resize(convDim); dimStride[0] = this->layerData.layerStructure.Stride.z; dimStride[1] = this->layerData.layerStructure.Stride.y; dimStride[2] = this->layerData.layerStructure.Stride.x; } else if(this->layerData.inputDataStruct.y > 1) { dataDim = 1 + 1 + 2; dimInput.resize(dataDim); dimInput[0] = this->batchSize; dimInput[1] = this->layerData.inputDataStruct.ch; dimInput[2] = this->layerData.inputDataStruct.y * this->layerData.layerStructure.UpScale.y; dimInput[3] = this->layerData.inputDataStruct.x * this->layerData.layerStructure.UpScale.x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2] * dimInput[3]; dimInputStride[1] = dimInput[2] * dimInput[3]; dimInputStride[2] = dimInput[3]; dimInputStride[3] = 1; dimBias.resize(dataDim); dimBias[0] = 1; dimBias[1] = this->layerData.GetOutputDataStruct().ch; dimBias[2] = 1; dimBias[3] = 1; dimBiasStride.resize(dataDim); dimBiasStride[0] = dimBias[1] * dimBias[2] * dimBias[3]; dimBiasStride[1] = dimBias[2] * dimBias[3]; dimBiasStride[2] = dimBias[3]; dimBiasStride[3] = 1; dimOutput.resize(dataDim); filterDim = 1 + 1 + 2; // + + 3 dimFilter.resize(filterDim); dimFilter[0] = this->layerData.GetOutputDataStruct().ch; dimFilter[1] = this->layerData.inputDataStruct.ch; dimFilter[2] = this->layerData.layerStructure.FilterSize.y; dimFilter[3] = this->layerData.layerStructure.FilterSize.x; convDim = 2; // 3 dimPadding.resize(convDim); dimPadding[0] = this->layerData.layerStructure.Padding.y; dimPadding[1] = this->layerData.layerStructure.Padding.x; dimDilation.resize(convDim); dimDilation[0] = 1; dimDilation[1] = 1; dimStride.resize(convDim); dimStride[0] = this->layerData.layerStructure.Stride.y; dimStride[1] = this->layerData.layerStructure.Stride.x; } else if(this->layerData.inputDataStruct.x > 1) { dataDim = 1 + 1 + 1; dimInput.resize(dataDim); dimInput[0] = this->batchSize; dimInput[1] = this->layerData.inputDataStruct.ch; dimInput[2] = this->layerData.inputDataStruct.x * this->layerData.layerStructure.UpScale.x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2]; dimInputStride[1] = dimInput[2]; dimInputStride[2] = 1; dimBias.resize(dataDim); dimBias[0] = 1; dimBias[1] = this->layerData.GetOutputDataStruct().ch; dimBias[2] = 1; dimBiasStride.resize(dataDim); dimBiasStride[0] = dimBias[1] * dimBias[2]; dimBiasStride[1] = dimBias[2]; dimBiasStride[2] = 1; dimOutput.resize(dataDim); filterDim = 1 + 1 + 1; // + + 3 dimFilter.resize(filterDim); dimFilter[0] = this->layerData.GetOutputDataStruct().ch; dimFilter[1] = this->layerData.inputDataStruct.ch; dimFilter[2] = this->layerData.layerStructure.FilterSize.x; convDim = 1; // 3 dimPadding.resize(convDim); dimPadding[0] = this->layerData.layerStructure.Padding.x; dimDilation.resize(convDim); dimDilation[0] = 1; dimStride.resize(convDim); dimStride[0] = this->layerData.layerStructure.Stride.x; } else { return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; } // CUDNN err_cudnn = cudnnSetTensorNdDescriptor( this->inputTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimInput[0], &dimInputStride[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_ALLOCATION_MEMORY; // err_cudnn = cudnnSetFilterNdDescriptor( this->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, filterDim, &dimFilter[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // err_cudnn = cudnnSetConvolutionNdDescriptor( this->convDesc, convDim, &dimPadding[0], &dimStride[0], &dimDilation[0], CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // err_cudnn = cudnnGetConvolutionNdForwardOutputDim( this->convDesc, this->inputTensorDesc, this->filterDesc, dataDim, &dimOutput[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // CUDNNGravisbell Gravisbell::Vector3D<S32> outputVector; S32 outputBatchSize = dimOutput[0]; S32 outputCh = dimOutput[1]; if(dataDim == 5) { outputVector.z = dimOutput[2]; outputVector.y = dimOutput[3]; outputVector.x = dimOutput[4]; } else if(dataDim == 4) { outputVector.z = 1; outputVector.y = dimOutput[2]; outputVector.x = dimOutput[3]; } else if(dataDim == 3) { outputVector.z = 1; outputVector.y = 1; outputVector.x = dimOutput[2]; } if(outputBatchSize != this->batchSize) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; if(outputCh != this->GetOutputDataStruct().ch) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; if(outputVector.z != this->GetOutputDataStruct().z) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; if(outputVector.y != this->GetOutputDataStruct().y) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; if(outputVector.x != this->GetOutputDataStruct().x) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // CUDNN dimOutputStride.resize(dataDim); for(S32 i=0; i<dataDim; i++) { dimOutputStride[i] = 1; for(S32 j=i+1; j<dataDim; j++) dimOutputStride[i] *= dimOutput[j]; } err_cudnn = cudnnSetTensorNdDescriptor( this->outputTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimOutput[0], &dimOutputStride[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_ALLOCATION_MEMORY; // () err_cudnn = cudnnGetConvolutionForwardAlgorithm( this->cudnnHandle, this->inputTensorDesc, this->filterDesc, this->convDesc, this->outputTensorDesc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, // 0, // &this->useForwardAlgorithm ); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // () size_t workSpaceSizeByte_forward; err_cudnn = cudnnGetConvolutionForwardWorkspaceSize( this->cudnnHandle, this->inputTensorDesc, this->filterDesc, this->convDesc, this->outputTensorDesc, this->useForwardAlgorithm, &workSpaceSizeByte_forward); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // (-) err_cudnn = cudnnGetConvolutionBackwardDataAlgorithm( this->cudnnHandle, this->filterDesc, this->outputTensorDesc, this->convDesc, this->inputTensorDesc, cudnnConvolutionBwdDataPreference_t::CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, // 0, // &this->useBackwardDataAlgorithm); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // (-) size_t workSpaceSizeByte_backwardData; err_cudnn = cudnnGetConvolutionBackwardDataWorkspaceSize( this->cudnnHandle, this->filterDesc, this->outputTensorDesc, this->convDesc, this->inputTensorDesc, this->useBackwardDataAlgorithm, &workSpaceSizeByte_backwardData); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // (-) err_cudnn = cudnnGetConvolutionBackwardFilterAlgorithm( this->cudnnHandle, this->inputTensorDesc, this->outputTensorDesc, this->convDesc, this->filterDesc, cudnnConvolutionBwdFilterPreference_t::CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, // 0, // &this->useBackwardFilterAlgorithm); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // (-) size_t workSpaceSizeByte_backwardFilter; err_cudnn = cudnnGetConvolutionBackwardFilterWorkspaceSize( this->cudnnHandle, this->inputTensorDesc, this->outputTensorDesc, this->convDesc, this->filterDesc, this->useBackwardFilterAlgorithm, &workSpaceSizeByte_backwardFilter); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // this->workSpace.resize(max(workSpaceSizeByte_forward, max(workSpaceSizeByte_backwardData, workSpaceSizeByte_backwardFilter))); // this->lpOutputBuffer.resize(this->batchSize * this->outputBufferCount); // err_cudnn = cudnnSetTensorNdDescriptor( this->biasTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimBias[0], &dimBiasStride[0]); return ErrorCode::ERROR_CODE_NONE; } /** . Calculate. */ ErrorCode UpConvolution_GPU::PreProcessLearnLoop(const SettingData::Standard::IData& data) { if(this->pLearnData != NULL) delete this->pLearnData; this->pLearnData = data.Clone(); // { auto pItem = dynamic_cast<const Gravisbell::SettingData::Standard::IItem_Float*>(data.GetItemByID(L"LearnCoeff")); if(pItem) this->learnData.LearnCoeff = pItem->GetValue(); else this->learnData.LearnCoeff = 1.0f; } return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** . Calculate. */ ErrorCode UpConvolution_GPU::PreProcessCalculateLoop() { return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** . @param lpInputBuffer . GetInputBufferCount @return 0 */ ErrorCode UpConvolution_GPU::Calculate(CONST_BATCH_BUFFER_POINTER i_lpInputBuffer) { cudnnStatus_t err_cudnn; // this->m_lppInputBuffer_d = i_lpInputBuffer; // { } // { F32 alpha = 1.0f; F32 beta = 0.0f; err_cudnn = cudnnConvolutionForward( this->cudnnHandle, &alpha, this->inputTensorDesc, i_lpInputBuffer, this->filterDesc, thrust::raw_pointer_cast(&this->layerData.lppNeuron_d[0]), this->convDesc, this->useForwardAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->outputTensorDesc, thrust::raw_pointer_cast(&this->lpOutputBuffer[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } // { F32 alpha = 1.0f; F32 beta = 1.0f; err_cudnn = cudnnAddTensor( this->cudnnHandle, &alpha, this->biasTensorDesc, thrust::raw_pointer_cast(&this->layerData.lpBias_d[0]), &beta, this->outputTensorDesc, thrust::raw_pointer_cast(&this->lpOutputBuffer[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } return ErrorCode::ERROR_CODE_NONE; } /** . GetOutputBufferCount. @return */ CONST_BATCH_BUFFER_POINTER UpConvolution_GPU::GetOutputBuffer()const { return thrust::raw_pointer_cast(&this->lpOutputBuffer[0]); } /** . @param o_lpOutputBuffer . [GetBatchSize()][GetOutputBufferCount()] @return 0 */ ErrorCode UpConvolution_GPU::GetOutputBuffer(BATCH_BUFFER_POINTER o_lpOutputBuffer)const { if(o_lpOutputBuffer == NULL) return ErrorCode::ERROR_CODE_COMMON_NULL_REFERENCE; const U32 batchSize = this->GetBatchSize(); const U32 outputBufferCount = this->GetOutputBufferCount(); hipMemcpy(o_lpOutputBuffer, this->GetOutputBuffer(), sizeof(F32)*outputBufferCount*batchSize, hipMemcpyDeviceToHost); return ErrorCode::ERROR_CODE_NONE; } //================================ // //================================ /** . Calculate. @param i_lppDOutputBuffer =. [GetBatchSize()][GetOutputBufferCount()]. */ ErrorCode UpConvolution_GPU::Training(CONST_BATCH_BUFFER_POINTER i_lpDOutputBufferPrev) { cudnnStatus_t err_cudnn; // this->m_lppDOutputBuffer_d = i_lpDOutputBufferPrev; // { F32 alpha = 1.0f; F32 beta = 0.0f; err_cudnn = cudnnConvolutionBackwardData( this->cudnnHandle, &alpha, this->filterDesc, thrust::raw_pointer_cast(&this->layerData.lppNeuron_d[0]), this->outputTensorDesc, this->m_lppDOutputBuffer_d, this->convDesc, this->useBackwardDataAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->inputTensorDesc, thrust::raw_pointer_cast(&this->lpDInputBuffer[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } // { F32 alpha = this->learnData.LearnCoeff; F32 beta = 1.0f; err_cudnn = cudnnConvolutionBackwardFilter( this->cudnnHandle, &alpha, this->inputTensorDesc, this->m_lppInputBuffer_d, this->outputTensorDesc, this->m_lppDOutputBuffer_d, this->convDesc, this->useBackwardFilterAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->filterDesc, thrust::raw_pointer_cast(&this->layerData.lppNeuron_d[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } // { F32 alpha = this->learnData.LearnCoeff; F32 beta = 1.0f; err_cudnn = cudnnConvolutionBackwardBias( this->cudnnHandle, &alpha, this->outputTensorDesc, this->m_lppDOutputBuffer_d, &beta, this->biasTensorDesc, thrust::raw_pointer_cast(&this->layerData.lpBias_d[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } return ErrorCode::ERROR_CODE_NONE; } /** . [GetBatchSize()][GetInputBufferCount()] @return */ CONST_BATCH_BUFFER_POINTER UpConvolution_GPU::GetDInputBuffer()const { return thrust::raw_pointer_cast(&this->lpDInputBuffer[0]); } /** . @param lpDInputBuffer .[GetBatchSize()][GetInputBufferCount()] */ ErrorCode UpConvolution_GPU::GetDInputBuffer(BATCH_BUFFER_POINTER o_lpDInputBuffer)const { if(o_lpDInputBuffer == NULL) return ErrorCode::ERROR_CODE_COMMON_NULL_REFERENCE; const U32 batchSize = this->GetBatchSize(); const U32 inputBufferCount = this->GetInputBufferCount(); hipMemcpy(o_lpDInputBuffer, this->GetDInputBuffer(), sizeof(F32)*inputBufferCount*this->batchSize, hipMemcpyDeviceToHost); return ErrorCode::ERROR_CODE_NONE; } } // Gravisbell; } // Layer; } // NeuralNetwork;
6bded826e1503f3537864cf20054057f6f9c8b35.cu
//====================================== // 畳み込みニューラルネットワークの結合レイヤー // GPU処理用 //====================================== #include"stdafx.h" #include"UpConvolution_DATA.hpp" #include"UpConvolution_FUNC.hpp" #include"UpConvolution_Base.h" #include"UpConvolution_GPU.cuh" #include"UpConvolution_LayerData_GPU.cuh" using namespace Gravisbell; using namespace Gravisbell::Layer::NeuralNetwork; namespace Gravisbell { namespace Layer { namespace NeuralNetwork { /** コンストラクタ */ UpConvolution_GPU::UpConvolution_GPU(Gravisbell::GUID guid, UpConvolution_LayerData_GPU& i_layerData) : UpConvolution_Base (guid) , layerData (i_layerData) /**< レイヤーデータ */ , inputBufferCount (0) /**< 入力バッファ数 */ , neuronCount (0) /**< ニューロン数 */ , outputBufferCount (0) /**< 出力バッファ数 */ , cudnnHandle (NULL) , inputTensorDesc (NULL) , outputTensorDesc (NULL) , biasTensorDesc (NULL) , filterDesc (NULL) , convDesc (NULL) { cudnnCreate(&cudnnHandle); cudnnCreateTensorDescriptor(&inputTensorDesc); cudnnCreateTensorDescriptor(&outputTensorDesc); cudnnCreateTensorDescriptor(&biasTensorDesc); cudnnCreateFilterDescriptor(&filterDesc); cudnnCreateConvolutionDescriptor(&convDesc); } /** デストラクタ */ UpConvolution_GPU::~UpConvolution_GPU() { if(convDesc) cudnnDestroyConvolutionDescriptor(convDesc); if(filterDesc) cudnnDestroyFilterDescriptor(filterDesc); if(biasTensorDesc) cudnnDestroyTensorDescriptor(biasTensorDesc); if(outputTensorDesc) cudnnDestroyTensorDescriptor(outputTensorDesc); if(inputTensorDesc) cudnnDestroyTensorDescriptor(inputTensorDesc); if(cudnnHandle) cudnnDestroy(cudnnHandle); } //================================ // 基本処理 //================================ /** レイヤー種別の取得 */ U32 UpConvolution_GPU::GetLayerKind()const { return Layer::ELayerKind::LAYER_KIND_GPU | GetLayerKindBase(); } /** 初期化. 各ニューロンの値をランダムに初期化 @return 成功した場合0 */ ErrorCode UpConvolution_GPU::Initialize(void) { return this->layerData.Initialize(); } //=========================== // レイヤーデータ関連 //=========================== /** レイヤーデータを取得する */ UpConvolution_LayerData_Base& UpConvolution_GPU::GetLayerData() { return this->layerData; } const UpConvolution_LayerData_Base& UpConvolution_GPU::GetLayerData()const { return this->layerData; } //================================ // 演算処理 //================================ /** 演算前処理を実行する.(学習用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はPreProcessLearnLoop以降の処理は実行不可. */ ErrorCode UpConvolution_GPU::PreProcessLearn(unsigned int batchSize) { ErrorCode errorCode = this->PreProcessCalculate(batchSize); if(errorCode != ErrorCode::ERROR_CODE_NONE) return errorCode; // 入力差分バッファを作成 this->lpDInputBuffer.resize(this->batchSize * this->inputBufferCount); // ニューロン/バイアスの誤差を一時保存するバッファを作成 this->lpDBias.resize(this->layerData.lpBias_d.size()); this->lppDNeuron.resize(this->layerData.lppNeuron_d.size()); return ErrorCode::ERROR_CODE_NONE; } /** 演算前処理を実行する.(演算用) @param batchSize 同時に演算を行うバッチのサイズ. NN作成後、演算処理を実行する前に一度だけ必ず実行すること。データごとに実行する必要はない. 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode UpConvolution_GPU::PreProcessCalculate(unsigned int batchSize) { this->batchSize = batchSize; // 入力バッファ数を確認 this->inputBufferCount = this->GetInputBufferCount(); if(this->inputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_INPUT_COUNT; // ニューロン数を確認 this->neuronCount = this->layerData.layerStructure.Output_Channel; if(this->neuronCount == 0) return ErrorCode::ERROR_CODE_FRAUD_NEURON_COUNT; // 出力バッファ数を確認 this->outputBufferCount = this->GetOutputBufferCount(); if(this->outputBufferCount == 0) return ErrorCode::ERROR_CODE_FRAUD_OUTPUT_COUNT; cudnnStatus_t err_cudnn; // 次元数を調べる S32 dataDim = 1 + 1 + 0; // バッチ + チャンネル + 次元0 std::vector<S32> dimInput; // 入力データ構造 std::vector<S32> dimInputStride; // 入力データの各次元ごとのデータ数 std::vector<S32> dimBias; std::vector<S32> dimBiasStride; std::vector<S32> dimOutput; std::vector<S32> dimOutputStride; S32 filterDim = 0; // フィルタ次元数 入力チャンネル + 出力チャンネル + 次元 std::vector<S32> dimFilter; S32 convDim = 0; // 畳み込み次元数 次元 std::vector<S32> dimStride; std::vector<S32> dimDilation; std::vector<S32> dimPadding; if(this->layerData.inputDataStruct.z > 1) { dataDim = 1 + 1 + 3; dimInput.resize(dataDim); dimInput[0] = this->batchSize; dimInput[1] = this->layerData.inputDataStruct.ch; dimInput[2] = this->layerData.inputDataStruct.z * this->layerData.layerStructure.UpScale.x; dimInput[3] = this->layerData.inputDataStruct.y * this->layerData.layerStructure.UpScale.y; dimInput[4] = this->layerData.inputDataStruct.x * this->layerData.layerStructure.UpScale.z; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2] * dimInput[3] * dimInput[4]; dimInputStride[1] = dimInput[2] * dimInput[3] * dimInput[4]; dimInputStride[2] = dimInput[3] * dimInput[4]; dimInputStride[3] = dimInput[4]; dimInputStride[4] = 1; dimBias.resize(dataDim); dimBias[0] = 1; dimBias[1] = this->layerData.GetOutputDataStruct().ch; dimBias[2] = 1; dimBias[3] = 1; dimBias[4] = 1; dimBiasStride.resize(dataDim); dimBiasStride[0] = dimBias[1] * dimBias[2] * dimBias[3] * dimBias[4]; dimBiasStride[1] = dimBias[2] * dimBias[3] * dimBias[4]; dimBiasStride[2] = dimBias[3] * dimBias[4]; dimBiasStride[3] = dimBias[4]; dimBiasStride[4] = 1; dimOutput.resize(dataDim); filterDim = 1 + 1 + 3; // 入力チャンネル + 出力チャンネル + 次元3 dimFilter.resize(filterDim); dimFilter[0] = this->layerData.GetOutputDataStruct().ch; dimFilter[1] = this->layerData.inputDataStruct.ch; dimFilter[2] = this->layerData.layerStructure.FilterSize.z; dimFilter[3] = this->layerData.layerStructure.FilterSize.y; dimFilter[4] = this->layerData.layerStructure.FilterSize.x; convDim = 3; // 次元3 dimPadding.resize(convDim); dimPadding[0] = this->layerData.layerStructure.Padding.z; dimPadding[1] = this->layerData.layerStructure.Padding.y; dimPadding[2] = this->layerData.layerStructure.Padding.x; dimDilation.resize(convDim); dimDilation[0] = 1; dimDilation[1] = 1; dimDilation[2] = 1; dimStride.resize(convDim); dimStride[0] = this->layerData.layerStructure.Stride.z; dimStride[1] = this->layerData.layerStructure.Stride.y; dimStride[2] = this->layerData.layerStructure.Stride.x; } else if(this->layerData.inputDataStruct.y > 1) { dataDim = 1 + 1 + 2; dimInput.resize(dataDim); dimInput[0] = this->batchSize; dimInput[1] = this->layerData.inputDataStruct.ch; dimInput[2] = this->layerData.inputDataStruct.y * this->layerData.layerStructure.UpScale.y; dimInput[3] = this->layerData.inputDataStruct.x * this->layerData.layerStructure.UpScale.x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2] * dimInput[3]; dimInputStride[1] = dimInput[2] * dimInput[3]; dimInputStride[2] = dimInput[3]; dimInputStride[3] = 1; dimBias.resize(dataDim); dimBias[0] = 1; dimBias[1] = this->layerData.GetOutputDataStruct().ch; dimBias[2] = 1; dimBias[3] = 1; dimBiasStride.resize(dataDim); dimBiasStride[0] = dimBias[1] * dimBias[2] * dimBias[3]; dimBiasStride[1] = dimBias[2] * dimBias[3]; dimBiasStride[2] = dimBias[3]; dimBiasStride[3] = 1; dimOutput.resize(dataDim); filterDim = 1 + 1 + 2; // 入力チャンネル + 出力チャンネル + 次元3 dimFilter.resize(filterDim); dimFilter[0] = this->layerData.GetOutputDataStruct().ch; dimFilter[1] = this->layerData.inputDataStruct.ch; dimFilter[2] = this->layerData.layerStructure.FilterSize.y; dimFilter[3] = this->layerData.layerStructure.FilterSize.x; convDim = 2; // 次元3 dimPadding.resize(convDim); dimPadding[0] = this->layerData.layerStructure.Padding.y; dimPadding[1] = this->layerData.layerStructure.Padding.x; dimDilation.resize(convDim); dimDilation[0] = 1; dimDilation[1] = 1; dimStride.resize(convDim); dimStride[0] = this->layerData.layerStructure.Stride.y; dimStride[1] = this->layerData.layerStructure.Stride.x; } else if(this->layerData.inputDataStruct.x > 1) { dataDim = 1 + 1 + 1; dimInput.resize(dataDim); dimInput[0] = this->batchSize; dimInput[1] = this->layerData.inputDataStruct.ch; dimInput[2] = this->layerData.inputDataStruct.x * this->layerData.layerStructure.UpScale.x; dimInputStride.resize(dataDim); dimInputStride[0] = dimInput[1] * dimInput[2]; dimInputStride[1] = dimInput[2]; dimInputStride[2] = 1; dimBias.resize(dataDim); dimBias[0] = 1; dimBias[1] = this->layerData.GetOutputDataStruct().ch; dimBias[2] = 1; dimBiasStride.resize(dataDim); dimBiasStride[0] = dimBias[1] * dimBias[2]; dimBiasStride[1] = dimBias[2]; dimBiasStride[2] = 1; dimOutput.resize(dataDim); filterDim = 1 + 1 + 1; // 入力チャンネル + 出力チャンネル + 次元3 dimFilter.resize(filterDim); dimFilter[0] = this->layerData.GetOutputDataStruct().ch; dimFilter[1] = this->layerData.inputDataStruct.ch; dimFilter[2] = this->layerData.layerStructure.FilterSize.x; convDim = 1; // 次元3 dimPadding.resize(convDim); dimPadding[0] = this->layerData.layerStructure.Padding.x; dimDilation.resize(convDim); dimDilation[0] = 1; dimStride.resize(convDim); dimStride[0] = this->layerData.layerStructure.Stride.x; } else { return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; } // CUDNNの入力データ構造を設定 err_cudnn = cudnnSetTensorNdDescriptor( this->inputTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimInput[0], &dimInputStride[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_ALLOCATION_MEMORY; // フィルタサイズを設定 err_cudnn = cudnnSetFilterNdDescriptor( this->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, filterDim, &dimFilter[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 畳み込み処理設定 err_cudnn = cudnnSetConvolutionNdDescriptor( this->convDesc, convDim, &dimPadding[0], &dimStride[0], &dimDilation[0], CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 出力データ構造を取得 err_cudnn = cudnnGetConvolutionNdForwardOutputDim( this->convDesc, this->inputTensorDesc, this->filterDesc, dataDim, &dimOutput[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // CUDNNの出力データ構造とGravisbellの出力データ構造が一致することを確認 Gravisbell::Vector3D<S32> outputVector; S32 outputBatchSize = dimOutput[0]; S32 outputCh = dimOutput[1]; if(dataDim == 5) { outputVector.z = dimOutput[2]; outputVector.y = dimOutput[3]; outputVector.x = dimOutput[4]; } else if(dataDim == 4) { outputVector.z = 1; outputVector.y = dimOutput[2]; outputVector.x = dimOutput[3]; } else if(dataDim == 3) { outputVector.z = 1; outputVector.y = 1; outputVector.x = dimOutput[2]; } if(outputBatchSize != this->batchSize) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; if(outputCh != this->GetOutputDataStruct().ch) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; if(outputVector.z != this->GetOutputDataStruct().z) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; if(outputVector.y != this->GetOutputDataStruct().y) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; if(outputVector.x != this->GetOutputDataStruct().x) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // CUDNNの出力データ構造を設定 dimOutputStride.resize(dataDim); for(S32 i=0; i<dataDim; i++) { dimOutputStride[i] = 1; for(S32 j=i+1; j<dataDim; j++) dimOutputStride[i] *= dimOutput[j]; } err_cudnn = cudnnSetTensorNdDescriptor( this->outputTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimOutput[0], &dimOutputStride[0]); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_ALLOCATION_MEMORY; // 最速のアルゴリズムを検索する(前方伝播) err_cudnn = cudnnGetConvolutionForwardAlgorithm( this->cudnnHandle, this->inputTensorDesc, this->filterDesc, this->convDesc, this->outputTensorDesc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, // メモリの使用量無制限で最速のアルゴリズムを調べる 0, // 使用可能なメモリの上限 &this->useForwardAlgorithm ); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 必要なメモリ量を調べる(前方伝播) size_t workSpaceSizeByte_forward; err_cudnn = cudnnGetConvolutionForwardWorkspaceSize( this->cudnnHandle, this->inputTensorDesc, this->filterDesc, this->convDesc, this->outputTensorDesc, this->useForwardAlgorithm, &workSpaceSizeByte_forward); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 最速のアルゴリズムを検索する(後方伝播-データ) err_cudnn = cudnnGetConvolutionBackwardDataAlgorithm( this->cudnnHandle, this->filterDesc, this->outputTensorDesc, this->convDesc, this->inputTensorDesc, cudnnConvolutionBwdDataPreference_t::CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, // メモリの使用量無制限で最速のアルゴリズムを調べる 0, // 使用可能なメモリの上限 &this->useBackwardDataAlgorithm); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 必要なメモリ量を調べる(後方伝播-データ) size_t workSpaceSizeByte_backwardData; err_cudnn = cudnnGetConvolutionBackwardDataWorkspaceSize( this->cudnnHandle, this->filterDesc, this->outputTensorDesc, this->convDesc, this->inputTensorDesc, this->useBackwardDataAlgorithm, &workSpaceSizeByte_backwardData); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 最速のアルゴリズムを検索する(後方伝播-データ) err_cudnn = cudnnGetConvolutionBackwardFilterAlgorithm( this->cudnnHandle, this->inputTensorDesc, this->outputTensorDesc, this->convDesc, this->filterDesc, cudnnConvolutionBwdFilterPreference_t::CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, // メモリの使用量無制限で最速のアルゴリズムを調べる 0, // 使用可能なメモリの上限 &this->useBackwardFilterAlgorithm); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 必要なメモリ量を調べる(後方伝播-データ) size_t workSpaceSizeByte_backwardFilter; err_cudnn = cudnnGetConvolutionBackwardFilterWorkspaceSize( this->cudnnHandle, this->inputTensorDesc, this->outputTensorDesc, this->convDesc, this->filterDesc, this->useBackwardFilterAlgorithm, &workSpaceSizeByte_backwardFilter); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_INITIALIZE; // 処理用バッファの確保 this->workSpace.resize(max(workSpaceSizeByte_forward, max(workSpaceSizeByte_backwardData, workSpaceSizeByte_backwardFilter))); // 出力バッファを作成 this->lpOutputBuffer.resize(this->batchSize * this->outputBufferCount); // バイアスのデータ構造を設定 err_cudnn = cudnnSetTensorNdDescriptor( this->biasTensorDesc, CUDNN_DATA_FLOAT, dataDim, &dimBias[0], &dimBiasStride[0]); return ErrorCode::ERROR_CODE_NONE; } /** 学習ループの初期化処理.データセットの学習開始前に実行する 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode UpConvolution_GPU::PreProcessLearnLoop(const SettingData::Standard::IData& data) { if(this->pLearnData != NULL) delete this->pLearnData; this->pLearnData = data.Clone(); // 学習係数 { auto pItem = dynamic_cast<const Gravisbell::SettingData::Standard::IItem_Float*>(data.GetItemByID(L"LearnCoeff")); if(pItem) this->learnData.LearnCoeff = pItem->GetValue(); else this->learnData.LearnCoeff = 1.0f; } return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** 演算ループの初期化処理.データセットの演算開始前に実行する 失敗した場合はCalculate以降の処理は実行不可. */ ErrorCode UpConvolution_GPU::PreProcessCalculateLoop() { return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** 演算処理を実行する. @param lpInputBuffer 入力データバッファ. GetInputBufferCountで取得した値の要素数が必要 @return 成功した場合0が返る */ ErrorCode UpConvolution_GPU::Calculate(CONST_BATCH_BUFFER_POINTER i_lpInputBuffer) { cudnnStatus_t err_cudnn; // 入力バッファを保存 this->m_lppInputBuffer_d = i_lpInputBuffer; // 入力信号を拡張 { } // 畳み込み処理 { F32 alpha = 1.0f; F32 beta = 0.0f; err_cudnn = cudnnConvolutionForward( this->cudnnHandle, &alpha, this->inputTensorDesc, i_lpInputBuffer, this->filterDesc, thrust::raw_pointer_cast(&this->layerData.lppNeuron_d[0]), this->convDesc, this->useForwardAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->outputTensorDesc, thrust::raw_pointer_cast(&this->lpOutputBuffer[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } // バイアスを追加 { F32 alpha = 1.0f; F32 beta = 1.0f; err_cudnn = cudnnAddTensor( this->cudnnHandle, &alpha, this->biasTensorDesc, thrust::raw_pointer_cast(&this->layerData.lpBias_d[0]), &beta, this->outputTensorDesc, thrust::raw_pointer_cast(&this->lpOutputBuffer[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } return ErrorCode::ERROR_CODE_NONE; } /** 出力データバッファを取得する. 配列の要素数はGetOutputBufferCountの戻り値. @return 出力データ配列の先頭ポインタ */ CONST_BATCH_BUFFER_POINTER UpConvolution_GPU::GetOutputBuffer()const { return thrust::raw_pointer_cast(&this->lpOutputBuffer[0]); } /** 出力データバッファを取得する. @param o_lpOutputBuffer 出力データ格納先配列. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要 @return 成功した場合0 */ ErrorCode UpConvolution_GPU::GetOutputBuffer(BATCH_BUFFER_POINTER o_lpOutputBuffer)const { if(o_lpOutputBuffer == NULL) return ErrorCode::ERROR_CODE_COMMON_NULL_REFERENCE; const U32 batchSize = this->GetBatchSize(); const U32 outputBufferCount = this->GetOutputBufferCount(); cudaMemcpy(o_lpOutputBuffer, this->GetOutputBuffer(), sizeof(F32)*outputBufferCount*batchSize, cudaMemcpyDeviceToHost); return ErrorCode::ERROR_CODE_NONE; } //================================ // 学習処理 //================================ /** 学習処理を実行する. 入力信号、出力信号は直前のCalculateの値を参照する. @param i_lppDOutputBuffer 出力誤差差分=次レイヤーの入力誤差差分. [GetBatchSize()の戻り値][GetOutputBufferCount()の戻り値]の要素数が必要. 直前の計算結果を使用する */ ErrorCode UpConvolution_GPU::Training(CONST_BATCH_BUFFER_POINTER i_lpDOutputBufferPrev) { cudnnStatus_t err_cudnn; // 出力誤差バッファのアドレスを格納 this->m_lppDOutputBuffer_d = i_lpDOutputBufferPrev; // 入力誤差を計算 { F32 alpha = 1.0f; F32 beta = 0.0f; err_cudnn = cudnnConvolutionBackwardData( this->cudnnHandle, &alpha, this->filterDesc, thrust::raw_pointer_cast(&this->layerData.lppNeuron_d[0]), this->outputTensorDesc, this->m_lppDOutputBuffer_d, this->convDesc, this->useBackwardDataAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->inputTensorDesc, thrust::raw_pointer_cast(&this->lpDInputBuffer[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } // フィルター変化量を計算 { F32 alpha = this->learnData.LearnCoeff; F32 beta = 1.0f; err_cudnn = cudnnConvolutionBackwardFilter( this->cudnnHandle, &alpha, this->inputTensorDesc, this->m_lppInputBuffer_d, this->outputTensorDesc, this->m_lppDOutputBuffer_d, this->convDesc, this->useBackwardFilterAlgorithm, thrust::raw_pointer_cast(&this->workSpace[0]), this->workSpace.size(), &beta, this->filterDesc, thrust::raw_pointer_cast(&this->layerData.lppNeuron_d[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } // バイアス変化量を計算 { F32 alpha = this->learnData.LearnCoeff; F32 beta = 1.0f; err_cudnn = cudnnConvolutionBackwardBias( this->cudnnHandle, &alpha, this->outputTensorDesc, this->m_lppDOutputBuffer_d, &beta, this->biasTensorDesc, thrust::raw_pointer_cast(&this->layerData.lpBias_d[0])); if(err_cudnn != 0) return ErrorCode::ERROR_CODE_CUDA_CALCULATE; } return ErrorCode::ERROR_CODE_NONE; } /** 学習差分を取得する. 配列の要素数は[GetBatchSize()の戻り値][GetInputBufferCount()の戻り値] @return 誤差差分配列の先頭ポインタ */ CONST_BATCH_BUFFER_POINTER UpConvolution_GPU::GetDInputBuffer()const { return thrust::raw_pointer_cast(&this->lpDInputBuffer[0]); } /** 学習差分を取得する. @param lpDInputBuffer 学習差分を格納する配列.[GetBatchSize()の戻り値][GetInputBufferCount()の戻り値]の配列が必要 */ ErrorCode UpConvolution_GPU::GetDInputBuffer(BATCH_BUFFER_POINTER o_lpDInputBuffer)const { if(o_lpDInputBuffer == NULL) return ErrorCode::ERROR_CODE_COMMON_NULL_REFERENCE; const U32 batchSize = this->GetBatchSize(); const U32 inputBufferCount = this->GetInputBufferCount(); cudaMemcpy(o_lpDInputBuffer, this->GetDInputBuffer(), sizeof(F32)*inputBufferCount*this->batchSize, cudaMemcpyDeviceToHost); return ErrorCode::ERROR_CODE_NONE; } } // Gravisbell; } // Layer; } // NeuralNetwork;
5a47aab8a4892804fe52378d4a08e76d9734e493.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_save_kernel [6][1]; static int dims_save_kernel_h [6][1] = {0}; //user function __device__ void save_kernel_gpu(ACC<double> &rho_old, ACC<double> &rhou_old, ACC<double> &rhoE_old, const ACC<double> &rho_new, const ACC<double> &rhou_new, const ACC<double> &rhoE_new) { rho_old(0)=rho_new(0); rhou_old(0)=rhou_new(0); rhoE_old(0)=rhoE_new(0); } __global__ void ops_save_kernel( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, int size0 ){ int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1; arg1 += idx_x * 1*1; arg2 += idx_x * 1*1; arg3 += idx_x * 1*1; arg4 += idx_x * 1*1; arg5 += idx_x * 1*1; if (idx_x < size0) { ACC<double> argp0(arg0); ACC<double> argp1(arg1); ACC<double> argp2(arg2); const ACC<double> argp3(arg3); const ACC<double> argp4(arg4); const ACC<double> argp5(arg5); save_kernel_gpu(argp0, argp1, argp2, argp3, argp4, argp5); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_save_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { #else void ops_par_loop_save_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; #endif //Timing double t1,t2,c1,c2; ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,6,range,1)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(1,"save_kernel"); OPS_kernels[1].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[1]; int end[1]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[1]; #endif #ifdef OPS_MPI if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<1; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim0 != dims_save_kernel_h[0][0] || xdim1 != dims_save_kernel_h[1][0] || xdim2 != dims_save_kernel_h[2][0] || xdim3 != dims_save_kernel_h[3][0] || xdim4 != dims_save_kernel_h[4][0] || xdim5 != dims_save_kernel_h[5][0]) { dims_save_kernel_h[0][0] = xdim0; dims_save_kernel_h[1][0] = xdim1; dims_save_kernel_h[2][0] = xdim2; dims_save_kernel_h[3][0] = xdim3; dims_save_kernel_h[4][0] = xdim4; dims_save_kernel_h[5][0] = xdim5; cutilSafeCall(hipMemcpyToSymbol( dims_save_kernel, dims_save_kernel_h, sizeof(dims_save_kernel))); } int x_size = MAX(0,end[0]-start[0]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1); dim3 tblock(OPS_block_size_x,1,1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); char *p_a[6]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); p_a[5] = (char *)args[5].data_d + base5; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args,6,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[1].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0) hipLaunchKernelGGL(( ops_save_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5],x_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[1].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[1].mpi_time += t2-t1; OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5); } } #ifdef OPS_LAZY void ops_par_loop_save_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 1; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 1; for ( int i=0; i<2; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 6; desc->args = (ops_arg*)malloc(6*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->function = ops_par_loop_save_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(1,"save_kernel"); } ops_enqueue_kernel(desc); } #endif
5a47aab8a4892804fe52378d4a08e76d9734e493.cu
// // auto-generated by ops.py // __constant__ int dims_save_kernel [6][1]; static int dims_save_kernel_h [6][1] = {0}; //user function __device__ void save_kernel_gpu(ACC<double> &rho_old, ACC<double> &rhou_old, ACC<double> &rhoE_old, const ACC<double> &rho_new, const ACC<double> &rhou_new, const ACC<double> &rhoE_new) { rho_old(0)=rho_new(0); rhou_old(0)=rhou_new(0); rhoE_old(0)=rhoE_new(0); } __global__ void ops_save_kernel( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, int size0 ){ int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1; arg1 += idx_x * 1*1; arg2 += idx_x * 1*1; arg3 += idx_x * 1*1; arg4 += idx_x * 1*1; arg5 += idx_x * 1*1; if (idx_x < size0) { ACC<double> argp0(arg0); ACC<double> argp1(arg1); ACC<double> argp2(arg2); const ACC<double> argp3(arg3); const ACC<double> argp4(arg4); const ACC<double> argp5(arg5); save_kernel_gpu(argp0, argp1, argp2, argp3, argp4, argp5); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_save_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { #else void ops_par_loop_save_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; #endif //Timing double t1,t2,c1,c2; ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,6,range,1)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(1,"save_kernel"); OPS_kernels[1].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[1]; int end[1]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[1]; #endif #ifdef OPS_MPI if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<1; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim0 != dims_save_kernel_h[0][0] || xdim1 != dims_save_kernel_h[1][0] || xdim2 != dims_save_kernel_h[2][0] || xdim3 != dims_save_kernel_h[3][0] || xdim4 != dims_save_kernel_h[4][0] || xdim5 != dims_save_kernel_h[5][0]) { dims_save_kernel_h[0][0] = xdim0; dims_save_kernel_h[1][0] = xdim1; dims_save_kernel_h[2][0] = xdim2; dims_save_kernel_h[3][0] = xdim3; dims_save_kernel_h[4][0] = xdim4; dims_save_kernel_h[5][0] = xdim5; cutilSafeCall(cudaMemcpyToSymbol( dims_save_kernel, dims_save_kernel_h, sizeof(dims_save_kernel))); } int x_size = MAX(0,end[0]-start[0]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1); dim3 tblock(OPS_block_size_x,1,1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); char *p_a[6]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); p_a[5] = (char *)args[5].data_d + base5; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args,6,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[1].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0) ops_save_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5],x_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[1].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[1].mpi_time += t2-t1; OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg5); } } #ifdef OPS_LAZY void ops_par_loop_save_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 1; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 1; for ( int i=0; i<2; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 6; desc->args = (ops_arg*)malloc(6*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->function = ops_par_loop_save_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(1,"save_kernel"); } ops_enqueue_kernel(desc); } #endif
f35e6e15dfa0cdceba466b183a1e6172c3559381.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// matmultKernel00.cu /// For CSU CS575 Spring 2011 /// Instructor: Wim Bohm /// Based on code from the CUDA Programming Guide /// Modified by Wim Bohm and David Newman /// Created: 2011-01-27 /// Last Modified: 2011-02-23 DVN /// /// Multiplies two matrices using CUDA: A x B = C /// /// Copy this file and modify the MatMultKernel device function for /// each of your experiments. /// #include "matmultKernel.h" #include <stdio.h> #ifndef FOOTPRINT_SIZE #define FOOTPRINT_SIZE BLOCK_SIZE #endif // Define a gpu kernel to perform matrix multiplication // of A x B = C. __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // Putting these into registers speeds access. int thread_row = threadIdx.y; int thread_col = threadIdx.x; int block_row = blockIdx.y; int block_col = blockIdx.x; // grid Dimensions int gridx = gridDim.x; int gridy = gridDim.y; // Iterate up to max times int max = (A.width / BLOCK_SIZE); // Loop over all sub matrices in block_row of A and block_col of B // required to compute Csub. Block multiply each pair of sub matrices // and accumulate results // Each THREAD BLOCK computes one sub matrix Csub of C // EACH THREAD creates its own matrix descriptor Csub float Cvalue1, Cvalue2, Cvalue3, Cvalue4; Cvalue1 = Cvalue2 = Cvalue3 = Cvalue4 = 0.0f; // Each thread computes one element of Csub in its copy of CValue for (int m = 0; m < max; ++m){ // 4 shared matrices that will be used to compute 4 values ( A * B ) ( A * B2 ) ( A2 * B ) (A2 * B2) __shared__ float shared_A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_A2[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_B2[BLOCK_SIZE][BLOCK_SIZE]; // Same as MatMultKernel00.cu except I removed Asub and Bsub. Looks more complex but it is the same thing. shared_A[thread_row][thread_col] = A.elements[(A.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * m)+(thread_row * A.stride + thread_col)]; shared_B[thread_row][thread_col] = B.elements[(B.stride * BLOCK_SIZE * m + BLOCK_SIZE * block_col)+(thread_row * B.stride + thread_col)]; // differs from shared_A, block_row is incremented by gridDim.y shared_A2[thread_row][thread_col] = A.elements[(A.stride * BLOCK_SIZE * (block_row +gridy)+ BLOCK_SIZE * m)+(thread_row * A.stride + thread_col)]; // differs from shared_B, block_col is incremented by gridDim.x shared_B2[thread_row][thread_col] = B.elements[(B.stride * BLOCK_SIZE * m + BLOCK_SIZE * (block_col + gridx))+(thread_row * B.stride + thread_col)]; // Synchronize to ensure all elements are read __syncthreads(); // Do an inproduct of one row of shared_A and one col of shared_B #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) { Cvalue1 += shared_A[thread_row][e] * shared_B[e][thread_col]; } // Do an inproduct of one row of shared_A and one col of shared_B2 #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) { Cvalue2 += shared_A[thread_row][e] * shared_B2[e][thread_col]; } // Do an inproduct of one row of shared_A2 and one col of shared_B #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) { Cvalue3 += shared_A2[thread_row][e] * shared_B[e][thread_col]; } // Do an inproduct of one row of shared_A2 and one col of shared_B #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) { Cvalue4 += shared_A2[thread_row][e] * shared_B2[e][thread_col]; } // Synchronize to ensure all Cvalues have been incremented // before reading in the next shared_A, shared_B, shared_A2, and shared_B2 BLOCKS __syncthreads(); } // Write to GLOBAL memory. // Each thread writes its own cell value. // Write result of (A * B) C.elements[(C.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * block_col) + (thread_row * C.stride + thread_col)] = Cvalue1; // Write result of (A * B2) C.elements[(C.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * (block_col +gridx) ) + (thread_row * C.stride + thread_col)] = Cvalue2; // Write result of (A2 * B) C.elements[(C.stride * BLOCK_SIZE * (block_row + gridy) + BLOCK_SIZE * block_col) + (thread_row * C.stride + thread_col)] = Cvalue3; // Write result of (A2 * B2) C.elements[(C.stride * BLOCK_SIZE * (block_row + gridy) + BLOCK_SIZE * (block_col +gridx)) + (thread_row * C.stride + thread_col)] = Cvalue4; }
f35e6e15dfa0cdceba466b183a1e6172c3559381.cu
/// /// matmultKernel00.cu /// For CSU CS575 Spring 2011 /// Instructor: Wim Bohm /// Based on code from the CUDA Programming Guide /// Modified by Wim Bohm and David Newman /// Created: 2011-01-27 /// Last Modified: 2011-02-23 DVN /// /// Multiplies two matrices using CUDA: A x B = C /// /// Copy this file and modify the MatMultKernel device function for /// each of your experiments. /// #include "matmultKernel.h" #include <stdio.h> #ifndef FOOTPRINT_SIZE #define FOOTPRINT_SIZE BLOCK_SIZE #endif // Define a gpu kernel to perform matrix multiplication // of A x B = C. __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // Putting these into registers speeds access. int thread_row = threadIdx.y; int thread_col = threadIdx.x; int block_row = blockIdx.y; int block_col = blockIdx.x; // grid Dimensions int gridx = gridDim.x; int gridy = gridDim.y; // Iterate up to max times int max = (A.width / BLOCK_SIZE); // Loop over all sub matrices in block_row of A and block_col of B // required to compute Csub. Block multiply each pair of sub matrices // and accumulate results // Each THREAD BLOCK computes one sub matrix Csub of C // EACH THREAD creates its own matrix descriptor Csub float Cvalue1, Cvalue2, Cvalue3, Cvalue4; Cvalue1 = Cvalue2 = Cvalue3 = Cvalue4 = 0.0f; // Each thread computes one element of Csub in its copy of CValue for (int m = 0; m < max; ++m){ // 4 shared matrices that will be used to compute 4 values ( A * B ) ( A * B2 ) ( A2 * B ) (A2 * B2) __shared__ float shared_A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_A2[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_B[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float shared_B2[BLOCK_SIZE][BLOCK_SIZE]; // Same as MatMultKernel00.cu except I removed Asub and Bsub. Looks more complex but it is the same thing. shared_A[thread_row][thread_col] = A.elements[(A.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * m)+(thread_row * A.stride + thread_col)]; shared_B[thread_row][thread_col] = B.elements[(B.stride * BLOCK_SIZE * m + BLOCK_SIZE * block_col)+(thread_row * B.stride + thread_col)]; // differs from shared_A, block_row is incremented by gridDim.y shared_A2[thread_row][thread_col] = A.elements[(A.stride * BLOCK_SIZE * (block_row +gridy)+ BLOCK_SIZE * m)+(thread_row * A.stride + thread_col)]; // differs from shared_B, block_col is incremented by gridDim.x shared_B2[thread_row][thread_col] = B.elements[(B.stride * BLOCK_SIZE * m + BLOCK_SIZE * (block_col + gridx))+(thread_row * B.stride + thread_col)]; // Synchronize to ensure all elements are read __syncthreads(); // Do an inproduct of one row of shared_A and one col of shared_B #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) { Cvalue1 += shared_A[thread_row][e] * shared_B[e][thread_col]; } // Do an inproduct of one row of shared_A and one col of shared_B2 #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) { Cvalue2 += shared_A[thread_row][e] * shared_B2[e][thread_col]; } // Do an inproduct of one row of shared_A2 and one col of shared_B #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) { Cvalue3 += shared_A2[thread_row][e] * shared_B[e][thread_col]; } // Do an inproduct of one row of shared_A2 and one col of shared_B #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) { Cvalue4 += shared_A2[thread_row][e] * shared_B2[e][thread_col]; } // Synchronize to ensure all Cvalues have been incremented // before reading in the next shared_A, shared_B, shared_A2, and shared_B2 BLOCKS __syncthreads(); } // Write to GLOBAL memory. // Each thread writes its own cell value. // Write result of (A * B) C.elements[(C.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * block_col) + (thread_row * C.stride + thread_col)] = Cvalue1; // Write result of (A * B2) C.elements[(C.stride * BLOCK_SIZE * block_row + BLOCK_SIZE * (block_col +gridx) ) + (thread_row * C.stride + thread_col)] = Cvalue2; // Write result of (A2 * B) C.elements[(C.stride * BLOCK_SIZE * (block_row + gridy) + BLOCK_SIZE * block_col) + (thread_row * C.stride + thread_col)] = Cvalue3; // Write result of (A2 * B2) C.elements[(C.stride * BLOCK_SIZE * (block_row + gridy) + BLOCK_SIZE * (block_col +gridx)) + (thread_row * C.stride + thread_col)] = Cvalue4; }
381860e24e63de8294f8b537cedd6f001a4dc4e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgemv_fermi.cu normal z -> c, Fri Jul 18 17:34:13 2014 */ #include "common_magma.h" #include "commonblas_c.h" #define PRECISION_c #define num_threads 128 #define gemv_bs 32 #define threadSize 128 __global__ void cgemvn_kernel1_fermi( int m, int n, int n1, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*num_threads + threadIdx.x; A += ind; magmaFloatComplex res = MAGMA_C_ZERO; for( int i=0; i < n1; i += gemv_bs ) { #pragma unroll for(int j=0; j < gemv_bs; j++) { res += A[0] * x[j]; A += lda; } x += gemv_bs; } if ( n > n1 ) { for(int j=0; j < (n-n1); j++) { res += A[0] * x[j]; A += lda; } } if ( ind < m ) y[ind] = alpha * res + beta * y[ind]; #endif /* (__CUDA_ARCH__ >= 200) */ } __global__ void cgemvn_kernel2_fermi( int m, int n, int n1, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*num_threads + threadIdx.x; A += ind; x += threadIdx.x; magmaFloatComplex res = MAGMA_C_ZERO; __shared__ magmaFloatComplex buff[num_threads]; for( int i=0; i < n1; i += num_threads ) { __syncthreads(); buff[threadIdx.x] = x[i]; __syncthreads(); #pragma unroll for(int j=0; j < num_threads; j++) { res += A[0]*buff[j]; A += lda; } } __syncthreads(); if ( n > n1 ) { buff[threadIdx.x] = x[n1]; __syncthreads(); for(int j=0; j<(n-n1); j++) { res += A[0]*buff[j]; A += lda; } } if ( ind < m ) y[ind] = alpha * res + beta * y[ind]; #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- This routine computes Y = alpha A x + beta y, on the GPU. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix A. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A @param[in] alpha COMPLEX. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x COMPLEX array of dimension n. @param[in] beta REAL. On entry, BETA specifies the scalar beta. @param[out] y COMPLEX array of dimension n. On exit Y = alpha A X + beta Y. @ingroup magma_cblas2_internal ********************************************************************/ extern "C" void magmablas_cgemvn_fermi( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magmaFloatComplex beta, magmaFloatComplex *y) { magma_int_t blocks = (m - 1)/num_threads + 1; dim3 grid(blocks, 1, 1); dim3 threads(num_threads, 1, 1); /* if ( m <= 8500 ) cgemvn_kernel1_fermi<<< grid, threads, 0, magma_stream >>> (m, n, (n / gemv_bs)*gemv_bs, alpha, A, lda, x, y); else */ hipLaunchKernelGGL(( cgemvn_kernel2_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, (n / num_threads)*num_threads, alpha, A, lda, x, beta, y); } __global__ void cgemvt_kernel_fermi( int m, int n, magmaFloatComplex alpha, int n1, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y) { #if (__CUDA_ARCH__ >= 200) unsigned int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[threadSize]; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex c_zero = MAGMA_C_ZERO; for(int i=0; i < n1; i += threadSize) { res += A[tx + i + lda * blockIdx.y] * x[tx + i]; } if ( m > n1 ) { if ( tx + n1 < m ) { res += A[tx + n1 + lda*blockIdx.y] * x[tx + n1]; } else { res += c_zero; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s > 32; s /= 2) { if ( tx < s ) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if ( tx < 32 ) { sdata[tx] += sdata[tx + 32]; } if ( tx == 0 ) { for(int i=1; i < 32; i++) { sdata[tx] += sdata[tx + i]; } } if ( tx == 0 ) { if ( blockIdx.y < n ) { y[blockIdx.y] = sdata[0] * alpha + beta * y[blockIdx.y]; } } #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- This routine computes y = alpha * A^T * x + beta*y, on the GPU. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix A. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A @param[in] alpha COMPLEX. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x COMPLEX array of dimension m. @param[in] beta COMPLEX. On entry, BETA specifies the scalar beta. @param[out] y COMPLEX array of dimension n. On exit Y = alpha A^T X + beta Y. @ingroup magma_cblas2_internal ********************************************************************/ extern "C" void magmablas_cgemvt_fermi( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magmaFloatComplex beta, magmaFloatComplex *y) { dim3 grid ( 1, n, 1 ); dim3 threads ( threadSize, 1, 1 ); hipLaunchKernelGGL(( cgemvt_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, (m / threadSize) * threadSize, A, lda, x, beta, y ); } __global__ void cgemvc_kernel_fermi( int m, int n, magmaFloatComplex alpha, int n1, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y) { #if (__CUDA_ARCH__ >= 200) unsigned int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[threadSize]; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex c_zero = MAGMA_C_ZERO; for(int i=0; i < n1; i += threadSize) { res += cuConjf(A[tx + i + lda * blockIdx.y]) * x[tx + i]; } if ( m > n1 ) { if ( tx + n1 < m ) { res += cuConjf(A[tx + n1 + lda*blockIdx.y]) * x[tx + n1]; } else { res += c_zero; } } sdata[tx] = res; __syncthreads(); /* if ( tx < 128 ) { sdata[tx] += sdata[tx + 128]; } __syncthreads(); */ if ( tx < 64 ) { sdata[tx] += sdata[tx + 64]; } __syncthreads(); if ( tx < 32 ) { sdata[tx] += sdata[tx + 32]; } if ( tx == 0 ) { for(int i=1; i < 32; i++) { sdata[tx] += sdata[tx + i]; } } if ( tx == 0 ) { if ( blockIdx.y < n ) { y[blockIdx.y] = sdata[0] * alpha + beta * y[blockIdx.y]; } } #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- This routine computes y = alpha * A^H * x + beta*y, on the GPU. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix A. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A @param[in] alpha COMPLEX. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x COMPLEX array of dimension m. @param[in] beta COMPLEX. On entry, BETA specifies the scalar beta. @param[out] y COMPLEX array of dimension n. On exit Y = alpha A^H X + beta y. @ingroup magma_cblas2_internal ********************************************************************/ extern "C" void magmablas_cgemvc_fermi( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magmaFloatComplex beta, magmaFloatComplex *y) { dim3 grid ( 1, n, 1 ); dim3 threads ( threadSize, 1, 1 ); hipLaunchKernelGGL(( cgemvc_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, (m / threadSize) * threadSize, A, lda, x, beta, y); } /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] x COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE REAL On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] y REAL array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_cgemv( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *y, magma_int_t incy) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_cgemv( trans, m, n, alpha, A, lda, x, incx, beta, y, incy ); #else magmablas_cgemv_tesla( trans, m, n, alpha, A, lda, x, incx, beta, y, incy ); #endif return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( incx == 1 && incy == 1 ) { if ( trans == MagmaNoTrans ) { if ( m < 7000 ) { magma_cgemv( trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } else { magmablas_cgemvn_fermi(m, n, alpha, A, lda, x, beta, y); } } else if ( trans == MagmaTrans ) { magmablas_cgemvt_fermi(m, n, alpha, A, lda, x, beta, y); } else if ( trans == MagmaConjTrans ) { magmablas_cgemvc_fermi(m, n, alpha, A, lda, x, beta, y); } else { fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) ); } } else { magma_cgemv( trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } } #undef num_threads #undef gemv_bs #undef threadSize
381860e24e63de8294f8b537cedd6f001a4dc4e5.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgemv_fermi.cu normal z -> c, Fri Jul 18 17:34:13 2014 */ #include "common_magma.h" #include "commonblas_c.h" #define PRECISION_c #define num_threads 128 #define gemv_bs 32 #define threadSize 128 __global__ void cgemvn_kernel1_fermi( int m, int n, int n1, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*num_threads + threadIdx.x; A += ind; magmaFloatComplex res = MAGMA_C_ZERO; for( int i=0; i < n1; i += gemv_bs ) { #pragma unroll for(int j=0; j < gemv_bs; j++) { res += A[0] * x[j]; A += lda; } x += gemv_bs; } if ( n > n1 ) { for(int j=0; j < (n-n1); j++) { res += A[0] * x[j]; A += lda; } } if ( ind < m ) y[ind] = alpha * res + beta * y[ind]; #endif /* (__CUDA_ARCH__ >= 200) */ } __global__ void cgemvn_kernel2_fermi( int m, int n, int n1, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*num_threads + threadIdx.x; A += ind; x += threadIdx.x; magmaFloatComplex res = MAGMA_C_ZERO; __shared__ magmaFloatComplex buff[num_threads]; for( int i=0; i < n1; i += num_threads ) { __syncthreads(); buff[threadIdx.x] = x[i]; __syncthreads(); #pragma unroll for(int j=0; j < num_threads; j++) { res += A[0]*buff[j]; A += lda; } } __syncthreads(); if ( n > n1 ) { buff[threadIdx.x] = x[n1]; __syncthreads(); for(int j=0; j<(n-n1); j++) { res += A[0]*buff[j]; A += lda; } } if ( ind < m ) y[ind] = alpha * res + beta * y[ind]; #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- This routine computes Y = alpha A x + beta y, on the GPU. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix A. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A @param[in] alpha COMPLEX. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x COMPLEX array of dimension n. @param[in] beta REAL. On entry, BETA specifies the scalar beta. @param[out] y COMPLEX array of dimension n. On exit Y = alpha A X + beta Y. @ingroup magma_cblas2_internal ********************************************************************/ extern "C" void magmablas_cgemvn_fermi( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magmaFloatComplex beta, magmaFloatComplex *y) { magma_int_t blocks = (m - 1)/num_threads + 1; dim3 grid(blocks, 1, 1); dim3 threads(num_threads, 1, 1); /* if ( m <= 8500 ) cgemvn_kernel1_fermi<<< grid, threads, 0, magma_stream >>> (m, n, (n / gemv_bs)*gemv_bs, alpha, A, lda, x, y); else */ cgemvn_kernel2_fermi<<< grid, threads, 0, magma_stream >>> (m, n, (n / num_threads)*num_threads, alpha, A, lda, x, beta, y); } __global__ void cgemvt_kernel_fermi( int m, int n, magmaFloatComplex alpha, int n1, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y) { #if (__CUDA_ARCH__ >= 200) unsigned int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[threadSize]; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex c_zero = MAGMA_C_ZERO; for(int i=0; i < n1; i += threadSize) { res += A[tx + i + lda * blockIdx.y] * x[tx + i]; } if ( m > n1 ) { if ( tx + n1 < m ) { res += A[tx + n1 + lda*blockIdx.y] * x[tx + n1]; } else { res += c_zero; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s > 32; s /= 2) { if ( tx < s ) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if ( tx < 32 ) { sdata[tx] += sdata[tx + 32]; } if ( tx == 0 ) { for(int i=1; i < 32; i++) { sdata[tx] += sdata[tx + i]; } } if ( tx == 0 ) { if ( blockIdx.y < n ) { y[blockIdx.y] = sdata[0] * alpha + beta * y[blockIdx.y]; } } #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- This routine computes y = alpha * A^T * x + beta*y, on the GPU. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix A. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A @param[in] alpha COMPLEX. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x COMPLEX array of dimension m. @param[in] beta COMPLEX. On entry, BETA specifies the scalar beta. @param[out] y COMPLEX array of dimension n. On exit Y = alpha A^T X + beta Y. @ingroup magma_cblas2_internal ********************************************************************/ extern "C" void magmablas_cgemvt_fermi( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magmaFloatComplex beta, magmaFloatComplex *y) { dim3 grid ( 1, n, 1 ); dim3 threads ( threadSize, 1, 1 ); cgemvt_kernel_fermi<<< grid, threads, 0, magma_stream >>> (m, n, alpha, (m / threadSize) * threadSize, A, lda, x, beta, y ); } __global__ void cgemvc_kernel_fermi( int m, int n, magmaFloatComplex alpha, int n1, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y) { #if (__CUDA_ARCH__ >= 200) unsigned int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[threadSize]; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex c_zero = MAGMA_C_ZERO; for(int i=0; i < n1; i += threadSize) { res += cuConjf(A[tx + i + lda * blockIdx.y]) * x[tx + i]; } if ( m > n1 ) { if ( tx + n1 < m ) { res += cuConjf(A[tx + n1 + lda*blockIdx.y]) * x[tx + n1]; } else { res += c_zero; } } sdata[tx] = res; __syncthreads(); /* if ( tx < 128 ) { sdata[tx] += sdata[tx + 128]; } __syncthreads(); */ if ( tx < 64 ) { sdata[tx] += sdata[tx + 64]; } __syncthreads(); if ( tx < 32 ) { sdata[tx] += sdata[tx + 32]; } if ( tx == 0 ) { for(int i=1; i < 32; i++) { sdata[tx] += sdata[tx + i]; } } if ( tx == 0 ) { if ( blockIdx.y < n ) { y[blockIdx.y] = sdata[0] * alpha + beta * y[blockIdx.y]; } } #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- This routine computes y = alpha * A^H * x + beta*y, on the GPU. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix A. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A @param[in] alpha COMPLEX. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x COMPLEX array of dimension m. @param[in] beta COMPLEX. On entry, BETA specifies the scalar beta. @param[out] y COMPLEX array of dimension n. On exit Y = alpha A^H X + beta y. @ingroup magma_cblas2_internal ********************************************************************/ extern "C" void magmablas_cgemvc_fermi( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magmaFloatComplex beta, magmaFloatComplex *y) { dim3 grid ( 1, n, 1 ); dim3 threads ( threadSize, 1, 1 ); cgemvc_kernel_fermi<<< grid, threads, 0, magma_stream >>> (m, n, alpha, (m / threadSize) * threadSize, A, lda, x, beta, y); } /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] x COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE REAL On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] y REAL array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_cgemv( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *y, magma_int_t incy) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_cgemv( trans, m, n, alpha, A, lda, x, incx, beta, y, incy ); #else magmablas_cgemv_tesla( trans, m, n, alpha, A, lda, x, incx, beta, y, incy ); #endif return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( incx == 1 && incy == 1 ) { if ( trans == MagmaNoTrans ) { if ( m < 7000 ) { magma_cgemv( trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } else { magmablas_cgemvn_fermi(m, n, alpha, A, lda, x, beta, y); } } else if ( trans == MagmaTrans ) { magmablas_cgemvt_fermi(m, n, alpha, A, lda, x, beta, y); } else if ( trans == MagmaConjTrans ) { magmablas_cgemvc_fermi(m, n, alpha, A, lda, x, beta, y); } else { fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) ); } } else { magma_cgemv( trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } } #undef num_threads #undef gemv_bs #undef threadSize
250f2d4e283d04880df1d1f702e8d9bf77260775.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/utilities/nvtx_utils.hpp> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <utilities/legacy/bit_util.cuh> #include <bitmask/legacy/bit_mask.cuh> #include <rmm/thrust_rmm_allocator.h> #include <cudf/cudf.h> #include <hipcub/hipcub.hpp> #include <memory> #include <stdio.h> #include <algorithm> #include <cudf/legacy/copying.hpp> #include <cudf/legacy/rolling.hpp> #include <rolling/legacy/rolling_detail.hpp> // allocate column #include <io/utilities/wrapper_utils.hpp> #include <jit/launcher.h> #include <jit/legacy/type.h> #include <jit/parser.h> #include "jit/code/code.h" #include "jit/util/type.h" #include <types.h.jit> #include <types.hpp.jit> #include <rmm/device_scalar.hpp> #include <cudf/detail/utilities/cuda.cuh> namespace { /** * @brief Computes the rolling window function * * @tparam ColumnType Datatype of values pointed to by the pointers * @tparam agg_op A functor that defines the aggregation operation * @tparam average Perform average across all valid elements in the window * @param nrows[in] Number of rows in input table * @param output_valid_count[in] Number of valid rows in the output * @param out_col[out] Pointers to pre-allocated output column's data * @param out_cols_valid[out] Pointers to the pre-allocated validity mask of * the output column * @param in_col[in] Pointers to input column's data * @param in_cols_valid[in] Pointers to the validity mask of the input column * @param window[in] The static rolling window size, accumulates from * in_col[i-window+1] to in_col[i] inclusive * @param min_periods[in] Minimum number of observations in window required to * have a value, otherwise 0 is stored in the valid bit mask * @param forward_window[in] The static rolling window size in the forward * direction, accumulates from in_col[i] to * in_col[i+forward_window] inclusive * @param[in] window_col The window size values, window_col[i] specifies window * size for element i. If window_col = NULL, then window is used as * the static window size for all elements * @param[in] min_periods_col The minimum number of observation values, * min_periods_col[i] specifies minimum number of observations for * element i. If min_periods_col = NULL, then min_periods is used as * the static value for all elements * @param[in] forward_window_col The forward window size values, * forward_window_col[i] specifies forward window size for element i. * If forward_window_col = NULL, then forward_window is used as the * static forward window size for all elements */ template <typename ColumnType, class agg_op, bool average, cudf::size_type block_size> __launch_bounds__ (block_size) __global__ void gpu_rolling(cudf::size_type nrows, gdf_size_type * __restrict__ const output_valid_count, ColumnType * const __restrict__ out_col, bit_mask::bit_mask_t * const __restrict__ out_col_valid, ColumnType const * const __restrict__ in_col, bit_mask::bit_mask_t const * const __restrict__ in_col_valid, cudf::size_type window, cudf::size_type min_periods, cudf::size_type forward_window, const cudf::size_type *window_col, const cudf::size_type *min_periods_col, const cudf::size_type *forward_window_col) { // we're going to be using bit utils a lot in the kernel using namespace bit_mask; const bool is_nullable = (in_col_valid != nullptr); cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; cudf::size_type stride = blockDim.x * gridDim.x; agg_op op; gdf_size_type warp_valid_count{0}; auto active_threads = __ballot_sync(0xffffffff, i < nrows); while(i < nrows) { ColumnType val = agg_op::template identity<ColumnType>(); volatile cudf::size_type count = 0; // declare this as volatile to avoid some compiler optimizations that lead to incorrect results for CUDA 10.0 and below (fixed in CUDA 10.1) // dynamic window handling if (window_col != nullptr) window = window_col[i]; if (min_periods_col != nullptr) min_periods = max(min_periods_col[i], 1); // at least one observation is required if (forward_window_col != nullptr) forward_window = forward_window_col[i]; // compute bounds cudf::size_type start_index = max((cudf::size_type)0, i - window + 1); cudf::size_type end_index = min(nrows, i + forward_window + 1); // exclusive // aggregate // TODO: We should explore using shared memory to avoid redundant loads. // This might require separating the kernel into a special version // for dynamic and static sizes. for (cudf::size_type j = start_index; j < end_index; j++) { if (!is_nullable || is_valid(in_col_valid, j)) { val = op(in_col[j], val); count++; } } // check if we have enough input samples bool output_is_valid = (count >= min_periods); // set the mask bit_mask_t const result_mask{__ballot_sync(active_threads, output_is_valid)}; cudf::size_type const out_mask_location = cudf::util::detail::bit_container_index<bit_mask_t, cudf::size_type>(i); // only one thread writes the mask if (0 == threadIdx.x % warpSize){ out_col_valid[out_mask_location] = result_mask; warp_valid_count += __popc(result_mask); } // store the output value, one per thread if (output_is_valid) cudf::detail::store_output_functor<ColumnType, average>{}(out_col[i], val, count); // process next element i += stride; active_threads = __ballot_sync(active_threads, i < nrows); } // sum the valid counts across the whole block gdf_size_type block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count); if(threadIdx.x == 0){ atomicAdd(output_valid_count, block_valid_count); } } struct rolling_window_launcher { /** * @brief Uses SFINAE to instantiate only for supported type combos */ template<typename ColumnType, class agg_op, bool average, class... TArgs, typename std::enable_if_t<cudf::detail::is_supported<ColumnType, agg_op>(), std::nullptr_t> = nullptr> void dispatch_aggregation_type(cudf::size_type nrows, gdf_size_type& null_count, hipStream_t stream, TArgs... FArgs) { cudf::nvtx::range_push("CUDF_ROLLING", cudf::nvtx::color::ORANGE); constexpr cudf::size_type block = 256; cudf::size_type grid = (nrows + block-1) / block; rmm::device_scalar<gdf_size_type> device_valid_count{0, stream}; hipLaunchKernelGGL(( gpu_rolling<ColumnType, agg_op, average, block>), dim3(grid), dim3(block), 0, stream, nrows, device_valid_count.data(), FArgs...); null_count = nrows - device_valid_count.value(); // check the stream for debugging CHECK_CUDA(stream); cudf::nvtx::range_pop(); } /** * @brief If we cannot perform aggregation on this type then throw an error */ template<typename ColumnType, class agg_op, bool average, class... TArgs, typename std::enable_if_t<!cudf::detail::is_supported<ColumnType, agg_op>(), std::nullptr_t> = nullptr> void dispatch_aggregation_type(cudf::size_type nrows, gdf_size_type& null_count, hipStream_t stream, TArgs... FArgs) { CUDF_FAIL("Unsupported column type/operation combo. Only `min` and `max` are supported for non-arithmetic types for aggregations."); } /** * @brief Helper function for gdf_rolling. Deduces the type of the * aggregation column and type and calls another function to invoke the * rolling window kernel. */ template <typename ColumnType> void operator()(cudf::size_type nrows, gdf_size_type &null_count, gdf_agg_op agg_type, void *out_col_data_ptr, cudf::valid_type *out_col_valid_ptr, void *in_col_data_ptr, cudf::valid_type *in_col_valid_ptr, cudf::size_type window, cudf::size_type min_periods, cudf::size_type forward_window, const cudf::size_type *window_col, const cudf::size_type *min_periods_col, const cudf::size_type *forward_window_col, hipStream_t stream) { ColumnType *typed_out_data = static_cast<ColumnType*>(out_col_data_ptr); bit_mask::bit_mask_t *typed_out_valid = reinterpret_cast<bit_mask::bit_mask_t*>(out_col_valid_ptr); const ColumnType *typed_in_data = static_cast<const ColumnType*>(in_col_data_ptr); const bit_mask::bit_mask_t *typed_in_valid = reinterpret_cast<const bit_mask::bit_mask_t*>(in_col_valid_ptr); // TODO: We should consolidate our aggregation enums for reductions, scans, // groupby and rolling. @harrism suggested creating // aggregate_dispatcher that works like type_dispatcher. switch (agg_type) { case GDF_SUM: dispatch_aggregation_type<ColumnType, cudf::DeviceSum, false>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_MIN: dispatch_aggregation_type<ColumnType, cudf::DeviceMin, false>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_MAX: dispatch_aggregation_type<ColumnType, cudf::DeviceMax, false>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_COUNT: dispatch_aggregation_type<ColumnType, cudf::DeviceCount, false>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_AVG: dispatch_aggregation_type<ColumnType, cudf::DeviceSum, true>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; default: // TODO: need a nice way to convert enums to strings, same would be useful for groupbys CUDF_FAIL("Aggregation function " + std::to_string(agg_type) + " is not implemented"); } } }; } // anonymous namespace namespace cudf { // see rolling.hpp for declaration gdf_column* rolling_window(const gdf_column &input_col, cudf::size_type window, cudf::size_type min_periods, cudf::size_type forward_window, gdf_agg_op agg_type, const cudf::size_type *window_col, const cudf::size_type *min_periods_col, const cudf::size_type *forward_window_col) { CUDF_EXPECTS((window >= 0) && (min_periods >= 0) && (forward_window >= 0), "Window size and min periods must be non-negative"); // Use the column wrapper class from io/utilities to quickly create a column gdf_column_wrapper output_col(input_col.size, input_col.dtype, input_col.dtype_info, input_col.col_name == nullptr ? "" : std::string(input_col.col_name)); // If there are no rows in the input, return successfully if (input_col.size == 0) return output_col.release(); // Allocate memory for the output column output_col.allocate(); // At least one observation is required to procure a valid output min_periods = ::max(min_periods, 1); // always use the default stream for now hipStream_t stream = NULL; // Launch type dispatcher cudf::type_dispatcher(input_col.dtype, rolling_window_launcher{}, input_col.size, output_col->null_count, agg_type, output_col->data, output_col->valid, input_col.data, input_col.valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col, stream); // Release the gdf pointer from the wrapper class return output_col.release(); } gdf_column rolling_window(gdf_column const& input, cudf::size_type window, cudf::size_type min_periods, cudf::size_type forward_window, const std::string& user_defined_aggregator, gdf_agg_op agg_op, gdf_dtype output_type, cudf::size_type const* window_col, cudf::size_type const* min_periods_col, cudf::size_type const* forward_window_col) { CUDF_EXPECTS((window >= 0) && (min_periods >= 0) && (forward_window >= 0), "Window size and min periods must be non-negative"); gdf_column output = allocate_column(output_type, input.size, true); // If there are no rows in the input, return successfully if (input.size == 0) return output; if (input.null_count > 0) { CUDF_FAIL("Currently the UDF version of rolling window" " does NOT support inputs with nulls."); } // At least one observation is required to procure a valid output min_periods = ::max(min_periods, 1); std::string hash = "prog_rolling." + std::to_string(std::hash<std::string>{}(user_defined_aggregator)); std::string cuda_source; switch(agg_op){ case GDF_NUMBA_GENERIC_AGG_OPS: cuda_source = cudf::jit::parse_single_function_ptx( user_defined_aggregator, cudf::rolling::jit::get_function_name(agg_op), cudf::jit::getTypeName(output_type), {0, 5} // {0, 5} means the first and sixth args are pointers. ) + cudf::rolling::jit::code::kernel; break; case GDF_CUDA_GENERIC_AGG_OPS: cuda_source = cudf::jit::parse_single_function_cuda( user_defined_aggregator, cudf::rolling::jit::get_function_name(agg_op) ) + cudf::rolling::jit::code::kernel; break; default: CUDF_FAIL("Unsupported UDF type."); } // Launch the jitify kernel cudf::jit::launcher( hash, cuda_source, { cudf::rolling::jit::code::operation_h , cudf_types_h, cudf_types_hpp }, { "-std=c++14" }, nullptr ).set_kernel_inst( "gpu_rolling", // name of the kernel we are launching { cudf::jit::getTypeName(output.dtype), // list of template arguments cudf::jit::getTypeName(input.dtype), cudf::rolling::jit::get_operator_name(agg_op) } ).launch( output.size, output.data, output.valid, input.data, input.valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col ); set_null_count(output); return output; } } // namespace cudf
250f2d4e283d04880df1d1f702e8d9bf77260775.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/utilities/nvtx_utils.hpp> #include <cudf/utilities/legacy/type_dispatcher.hpp> #include <utilities/legacy/bit_util.cuh> #include <bitmask/legacy/bit_mask.cuh> #include <rmm/thrust_rmm_allocator.h> #include <cudf/cudf.h> #include <cub/cub.cuh> #include <memory> #include <stdio.h> #include <algorithm> #include <cudf/legacy/copying.hpp> #include <cudf/legacy/rolling.hpp> #include <rolling/legacy/rolling_detail.hpp> // allocate column #include <io/utilities/wrapper_utils.hpp> #include <jit/launcher.h> #include <jit/legacy/type.h> #include <jit/parser.h> #include "jit/code/code.h" #include "jit/util/type.h" #include <types.h.jit> #include <types.hpp.jit> #include <rmm/device_scalar.hpp> #include <cudf/detail/utilities/cuda.cuh> namespace { /** * @brief Computes the rolling window function * * @tparam ColumnType Datatype of values pointed to by the pointers * @tparam agg_op A functor that defines the aggregation operation * @tparam average Perform average across all valid elements in the window * @param nrows[in] Number of rows in input table * @param output_valid_count[in] Number of valid rows in the output * @param out_col[out] Pointers to pre-allocated output column's data * @param out_cols_valid[out] Pointers to the pre-allocated validity mask of * the output column * @param in_col[in] Pointers to input column's data * @param in_cols_valid[in] Pointers to the validity mask of the input column * @param window[in] The static rolling window size, accumulates from * in_col[i-window+1] to in_col[i] inclusive * @param min_periods[in] Minimum number of observations in window required to * have a value, otherwise 0 is stored in the valid bit mask * @param forward_window[in] The static rolling window size in the forward * direction, accumulates from in_col[i] to * in_col[i+forward_window] inclusive * @param[in] window_col The window size values, window_col[i] specifies window * size for element i. If window_col = NULL, then window is used as * the static window size for all elements * @param[in] min_periods_col The minimum number of observation values, * min_periods_col[i] specifies minimum number of observations for * element i. If min_periods_col = NULL, then min_periods is used as * the static value for all elements * @param[in] forward_window_col The forward window size values, * forward_window_col[i] specifies forward window size for element i. * If forward_window_col = NULL, then forward_window is used as the * static forward window size for all elements */ template <typename ColumnType, class agg_op, bool average, cudf::size_type block_size> __launch_bounds__ (block_size) __global__ void gpu_rolling(cudf::size_type nrows, gdf_size_type * __restrict__ const output_valid_count, ColumnType * const __restrict__ out_col, bit_mask::bit_mask_t * const __restrict__ out_col_valid, ColumnType const * const __restrict__ in_col, bit_mask::bit_mask_t const * const __restrict__ in_col_valid, cudf::size_type window, cudf::size_type min_periods, cudf::size_type forward_window, const cudf::size_type *window_col, const cudf::size_type *min_periods_col, const cudf::size_type *forward_window_col) { // we're going to be using bit utils a lot in the kernel using namespace bit_mask; const bool is_nullable = (in_col_valid != nullptr); cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; cudf::size_type stride = blockDim.x * gridDim.x; agg_op op; gdf_size_type warp_valid_count{0}; auto active_threads = __ballot_sync(0xffffffff, i < nrows); while(i < nrows) { ColumnType val = agg_op::template identity<ColumnType>(); volatile cudf::size_type count = 0; // declare this as volatile to avoid some compiler optimizations that lead to incorrect results for CUDA 10.0 and below (fixed in CUDA 10.1) // dynamic window handling if (window_col != nullptr) window = window_col[i]; if (min_periods_col != nullptr) min_periods = max(min_periods_col[i], 1); // at least one observation is required if (forward_window_col != nullptr) forward_window = forward_window_col[i]; // compute bounds cudf::size_type start_index = max((cudf::size_type)0, i - window + 1); cudf::size_type end_index = min(nrows, i + forward_window + 1); // exclusive // aggregate // TODO: We should explore using shared memory to avoid redundant loads. // This might require separating the kernel into a special version // for dynamic and static sizes. for (cudf::size_type j = start_index; j < end_index; j++) { if (!is_nullable || is_valid(in_col_valid, j)) { val = op(in_col[j], val); count++; } } // check if we have enough input samples bool output_is_valid = (count >= min_periods); // set the mask bit_mask_t const result_mask{__ballot_sync(active_threads, output_is_valid)}; cudf::size_type const out_mask_location = cudf::util::detail::bit_container_index<bit_mask_t, cudf::size_type>(i); // only one thread writes the mask if (0 == threadIdx.x % warpSize){ out_col_valid[out_mask_location] = result_mask; warp_valid_count += __popc(result_mask); } // store the output value, one per thread if (output_is_valid) cudf::detail::store_output_functor<ColumnType, average>{}(out_col[i], val, count); // process next element i += stride; active_threads = __ballot_sync(active_threads, i < nrows); } // sum the valid counts across the whole block gdf_size_type block_valid_count = cudf::experimental::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count); if(threadIdx.x == 0){ atomicAdd(output_valid_count, block_valid_count); } } struct rolling_window_launcher { /** * @brief Uses SFINAE to instantiate only for supported type combos */ template<typename ColumnType, class agg_op, bool average, class... TArgs, typename std::enable_if_t<cudf::detail::is_supported<ColumnType, agg_op>(), std::nullptr_t> = nullptr> void dispatch_aggregation_type(cudf::size_type nrows, gdf_size_type& null_count, cudaStream_t stream, TArgs... FArgs) { cudf::nvtx::range_push("CUDF_ROLLING", cudf::nvtx::color::ORANGE); constexpr cudf::size_type block = 256; cudf::size_type grid = (nrows + block-1) / block; rmm::device_scalar<gdf_size_type> device_valid_count{0, stream}; gpu_rolling<ColumnType, agg_op, average, block><<<grid, block, 0, stream>>>(nrows, device_valid_count.data(), FArgs...); null_count = nrows - device_valid_count.value(); // check the stream for debugging CHECK_CUDA(stream); cudf::nvtx::range_pop(); } /** * @brief If we cannot perform aggregation on this type then throw an error */ template<typename ColumnType, class agg_op, bool average, class... TArgs, typename std::enable_if_t<!cudf::detail::is_supported<ColumnType, agg_op>(), std::nullptr_t> = nullptr> void dispatch_aggregation_type(cudf::size_type nrows, gdf_size_type& null_count, cudaStream_t stream, TArgs... FArgs) { CUDF_FAIL("Unsupported column type/operation combo. Only `min` and `max` are supported for non-arithmetic types for aggregations."); } /** * @brief Helper function for gdf_rolling. Deduces the type of the * aggregation column and type and calls another function to invoke the * rolling window kernel. */ template <typename ColumnType> void operator()(cudf::size_type nrows, gdf_size_type &null_count, gdf_agg_op agg_type, void *out_col_data_ptr, cudf::valid_type *out_col_valid_ptr, void *in_col_data_ptr, cudf::valid_type *in_col_valid_ptr, cudf::size_type window, cudf::size_type min_periods, cudf::size_type forward_window, const cudf::size_type *window_col, const cudf::size_type *min_periods_col, const cudf::size_type *forward_window_col, cudaStream_t stream) { ColumnType *typed_out_data = static_cast<ColumnType*>(out_col_data_ptr); bit_mask::bit_mask_t *typed_out_valid = reinterpret_cast<bit_mask::bit_mask_t*>(out_col_valid_ptr); const ColumnType *typed_in_data = static_cast<const ColumnType*>(in_col_data_ptr); const bit_mask::bit_mask_t *typed_in_valid = reinterpret_cast<const bit_mask::bit_mask_t*>(in_col_valid_ptr); // TODO: We should consolidate our aggregation enums for reductions, scans, // groupby and rolling. @harrism suggested creating // aggregate_dispatcher that works like type_dispatcher. switch (agg_type) { case GDF_SUM: dispatch_aggregation_type<ColumnType, cudf::DeviceSum, false>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_MIN: dispatch_aggregation_type<ColumnType, cudf::DeviceMin, false>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_MAX: dispatch_aggregation_type<ColumnType, cudf::DeviceMax, false>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_COUNT: dispatch_aggregation_type<ColumnType, cudf::DeviceCount, false>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_AVG: dispatch_aggregation_type<ColumnType, cudf::DeviceSum, true>(nrows, null_count, stream, typed_out_data, typed_out_valid, typed_in_data, typed_in_valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; default: // TODO: need a nice way to convert enums to strings, same would be useful for groupbys CUDF_FAIL("Aggregation function " + std::to_string(agg_type) + " is not implemented"); } } }; } // anonymous namespace namespace cudf { // see rolling.hpp for declaration gdf_column* rolling_window(const gdf_column &input_col, cudf::size_type window, cudf::size_type min_periods, cudf::size_type forward_window, gdf_agg_op agg_type, const cudf::size_type *window_col, const cudf::size_type *min_periods_col, const cudf::size_type *forward_window_col) { CUDF_EXPECTS((window >= 0) && (min_periods >= 0) && (forward_window >= 0), "Window size and min periods must be non-negative"); // Use the column wrapper class from io/utilities to quickly create a column gdf_column_wrapper output_col(input_col.size, input_col.dtype, input_col.dtype_info, input_col.col_name == nullptr ? "" : std::string(input_col.col_name)); // If there are no rows in the input, return successfully if (input_col.size == 0) return output_col.release(); // Allocate memory for the output column output_col.allocate(); // At least one observation is required to procure a valid output min_periods = std::max(min_periods, 1); // always use the default stream for now cudaStream_t stream = NULL; // Launch type dispatcher cudf::type_dispatcher(input_col.dtype, rolling_window_launcher{}, input_col.size, output_col->null_count, agg_type, output_col->data, output_col->valid, input_col.data, input_col.valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col, stream); // Release the gdf pointer from the wrapper class return output_col.release(); } gdf_column rolling_window(gdf_column const& input, cudf::size_type window, cudf::size_type min_periods, cudf::size_type forward_window, const std::string& user_defined_aggregator, gdf_agg_op agg_op, gdf_dtype output_type, cudf::size_type const* window_col, cudf::size_type const* min_periods_col, cudf::size_type const* forward_window_col) { CUDF_EXPECTS((window >= 0) && (min_periods >= 0) && (forward_window >= 0), "Window size and min periods must be non-negative"); gdf_column output = allocate_column(output_type, input.size, true); // If there are no rows in the input, return successfully if (input.size == 0) return output; if (input.null_count > 0) { CUDF_FAIL("Currently the UDF version of rolling window" " does NOT support inputs with nulls."); } // At least one observation is required to procure a valid output min_periods = std::max(min_periods, 1); std::string hash = "prog_rolling." + std::to_string(std::hash<std::string>{}(user_defined_aggregator)); std::string cuda_source; switch(agg_op){ case GDF_NUMBA_GENERIC_AGG_OPS: cuda_source = cudf::jit::parse_single_function_ptx( user_defined_aggregator, cudf::rolling::jit::get_function_name(agg_op), cudf::jit::getTypeName(output_type), {0, 5} // {0, 5} means the first and sixth args are pointers. ) + cudf::rolling::jit::code::kernel; break; case GDF_CUDA_GENERIC_AGG_OPS: cuda_source = cudf::jit::parse_single_function_cuda( user_defined_aggregator, cudf::rolling::jit::get_function_name(agg_op) ) + cudf::rolling::jit::code::kernel; break; default: CUDF_FAIL("Unsupported UDF type."); } // Launch the jitify kernel cudf::jit::launcher( hash, cuda_source, { cudf::rolling::jit::code::operation_h , cudf_types_h, cudf_types_hpp }, { "-std=c++14" }, nullptr ).set_kernel_inst( "gpu_rolling", // name of the kernel we are launching { cudf::jit::getTypeName(output.dtype), // list of template arguments cudf::jit::getTypeName(input.dtype), cudf::rolling::jit::get_operator_name(agg_op) } ).launch( output.size, output.data, output.valid, input.data, input.valid, window, min_periods, forward_window, window_col, min_periods_col, forward_window_col ); set_null_count(output); return output; } } // namespace cudf
1a7f105471ee4e7923bf115a830e7b0f9e9e1fbd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Kernel that computes the gradient of an image, being the gradient * the difference between the neighbour pixels and the central pixel * of a cluster. */ __global__ void d_Gradient(float *ptrInputImage, float *ptrGradientImage, int Nx, int Ny, int Nz, int Kx, int Ky, int Kz) { int i, j, k, linearIndex; int Kradius_x, Kradius_y, Kradius_z; //Kradius_x = Kx/2; Kradius_y = Ky/2; Kradius_z = Kz/2; float output = 0, voxelValue = 0; // The blocks are larger than the voxel to be processed to load the edges of the kernel int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int z = threadIdx.z + blockDim.z*blockIdx.z; // Check if inside the image if((y>=Ny)||(x>=Nx)||(z>=Nz)||(y<0)||(x<0)||(z<0)) { return; } linearIndex = y + x*Ny + z*Nx*Ny; // col-wise stored matrix // Get the voxel value: voxelValue = ptrInputImage[linearIndex]; // Process only the voxels inside the processing window if((y>=Kradius_y)&&(x>=Kradius_x)&&(z>=Kradius_z)&&(y<(Ny-Ky))&&(x<Nx-Kradius_x)&&(z<Nz-Kradius_z)) { #pragma unroll for(i = 0; i < Kx; i++) { #pragma unroll for(j = 0; j < Ky; j++) { #pragma unroll for(k = 0; k < Kz; k++) { // Sum of differences linearIndex = (y-Kradius_y+j) + (x-Kradius_x+i)*Ny + (z-Kradius_z+k)*Ny*Nz; output += ptrInputImage[linearIndex]-voxelValue; } } } ptrGradientImage[linearIndex] = output; } }
1a7f105471ee4e7923bf115a830e7b0f9e9e1fbd.cu
/* Kernel that computes the gradient of an image, being the gradient * the difference between the neighbour pixels and the central pixel * of a cluster. */ __global__ void d_Gradient(float *ptrInputImage, float *ptrGradientImage, int Nx, int Ny, int Nz, int Kx, int Ky, int Kz) { int i, j, k, linearIndex; int Kradius_x, Kradius_y, Kradius_z; //Kradius_x = Kx/2; Kradius_y = Ky/2; Kradius_z = Kz/2; float output = 0, voxelValue = 0; // The blocks are larger than the voxel to be processed to load the edges of the kernel int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int z = threadIdx.z + blockDim.z*blockIdx.z; // Check if inside the image if((y>=Ny)||(x>=Nx)||(z>=Nz)||(y<0)||(x<0)||(z<0)) { return; } linearIndex = y + x*Ny + z*Nx*Ny; // col-wise stored matrix // Get the voxel value: voxelValue = ptrInputImage[linearIndex]; // Process only the voxels inside the processing window if((y>=Kradius_y)&&(x>=Kradius_x)&&(z>=Kradius_z)&&(y<(Ny-Ky))&&(x<Nx-Kradius_x)&&(z<Nz-Kradius_z)) { #pragma unroll for(i = 0; i < Kx; i++) { #pragma unroll for(j = 0; j < Ky; j++) { #pragma unroll for(k = 0; k < Kz; k++) { // Sum of differences linearIndex = (y-Kradius_y+j) + (x-Kradius_x+i)*Ny + (z-Kradius_z+k)*Ny*Nz; output += ptrInputImage[linearIndex]-voxelValue; } } } ptrGradientImage[linearIndex] = output; } }
a15b4f9d4e5720d8272fd80fe04e9b16e483cba7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/check_valid_impl.cuh" template <typename T, typename S> __global__ void CheckValidKernel(const size_t size, const T *box, const T *img_metas, S *valid) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const size_t left_x = i * 4; const size_t left_y = i * 4 + 1; const size_t right_x = i * 4 + 2; const size_t right_y = i * 4 + 3; S valid_flag = false; valid_flag |= !(box[left_x] >= static_cast<T>(0.0)); valid_flag |= !(box[left_y] >= static_cast<T>(0.0)); valid_flag |= !(img_metas[1] * img_metas[2] - static_cast<T>(1.0) >= box[right_x]); valid_flag |= !(img_metas[0] * img_metas[2] - static_cast<T>(1.0) >= box[right_y]); valid[i] = !valid_flag; } return; } template <typename S> __global__ void CheckValidKernel(const size_t size, const unsigned char *box, const unsigned char *img_metas, S *valid) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const size_t right_x = i * 4 + 2; const size_t right_y = i * 4 + 3; S valid_flag = false; valid_flag |= !(img_metas[0] * img_metas[2] >= box[right_x] + 1); valid_flag |= !(img_metas[1] * img_metas[2] >= box[right_y] + 1); valid[i] = !valid_flag; } return; } template <typename T, typename S> void CheckValid(const size_t &size, const T *box, const T *img_metas, S *valid, hipStream_t cuda_stream) { hipLaunchKernelGGL(( CheckValidKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, box, img_metas, valid); } template void CheckValid(const size_t &size, const float *box, const float *img_metas, bool *valid, hipStream_t cuda_stream); template void CheckValid(const size_t &size, const half *box, const half *img_metas, bool *valid, hipStream_t cuda_stream); template void CheckValid(const size_t &size, const short *box, const short *img_metas, bool *valid, // NOLINT hipStream_t cuda_stream); template void CheckValid(const size_t &size, const unsigned char *box, const unsigned char *img_metas, bool *valid, hipStream_t cuda_stream);
a15b4f9d4e5720d8272fd80fe04e9b16e483cba7.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/check_valid_impl.cuh" template <typename T, typename S> __global__ void CheckValidKernel(const size_t size, const T *box, const T *img_metas, S *valid) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const size_t left_x = i * 4; const size_t left_y = i * 4 + 1; const size_t right_x = i * 4 + 2; const size_t right_y = i * 4 + 3; S valid_flag = false; valid_flag |= !(box[left_x] >= static_cast<T>(0.0)); valid_flag |= !(box[left_y] >= static_cast<T>(0.0)); valid_flag |= !(img_metas[1] * img_metas[2] - static_cast<T>(1.0) >= box[right_x]); valid_flag |= !(img_metas[0] * img_metas[2] - static_cast<T>(1.0) >= box[right_y]); valid[i] = !valid_flag; } return; } template <typename S> __global__ void CheckValidKernel(const size_t size, const unsigned char *box, const unsigned char *img_metas, S *valid) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const size_t right_x = i * 4 + 2; const size_t right_y = i * 4 + 3; S valid_flag = false; valid_flag |= !(img_metas[0] * img_metas[2] >= box[right_x] + 1); valid_flag |= !(img_metas[1] * img_metas[2] >= box[right_y] + 1); valid[i] = !valid_flag; } return; } template <typename T, typename S> void CheckValid(const size_t &size, const T *box, const T *img_metas, S *valid, cudaStream_t cuda_stream) { CheckValidKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, box, img_metas, valid); } template void CheckValid(const size_t &size, const float *box, const float *img_metas, bool *valid, cudaStream_t cuda_stream); template void CheckValid(const size_t &size, const half *box, const half *img_metas, bool *valid, cudaStream_t cuda_stream); template void CheckValid(const size_t &size, const short *box, const short *img_metas, bool *valid, // NOLINT cudaStream_t cuda_stream); template void CheckValid(const size_t &size, const unsigned char *box, const unsigned char *img_metas, bool *valid, cudaStream_t cuda_stream);
6ad115e27ea08511130707b7ac2c0510ca1d6d53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_minus_4_front; int xdim0_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_minus_4_front; int ydim0_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_minus_4_front; int xdim1_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_minus_4_front; int ydim1_update_halo_kernel2_zvel_minus_4_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_minus_4_front*(y)+xdim0_update_halo_kernel2_zvel_minus_4_front*ydim0_update_halo_kernel2_zvel_minus_4_front*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_minus_4_front*(y)+xdim1_update_halo_kernel2_zvel_minus_4_front*ydim1_update_halo_kernel2_zvel_minus_4_front*(z)) //user function __device__ inline void update_halo_kernel2_zvel_minus_4_front_gpu(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = -zvel0[OPS_ACC0(0,0,-4)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = -zvel1[OPS_ACC1(0,0,-4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_minus_4_front( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_minus_4_front + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_minus_4_front * ydim0_update_halo_kernel2_zvel_minus_4_front; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_minus_4_front + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_minus_4_front * ydim1_update_halo_kernel2_zvel_minus_4_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_minus_4_front_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_minus_4_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_zvel_minus_4_front_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,57)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(57,"update_halo_kernel2_zvel_minus_4_front"); OPS_kernels[57].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_4_front_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_4_front_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_4_front_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_4_front_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_minus_4_front, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_minus_4_front_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_minus_4_front, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_minus_4_front_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_minus_4_front, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_minus_4_front_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_minus_4_front, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_minus_4_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[57].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_minus_4_front), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[57].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[57].mpi_time += t2-t1; OPS_kernels[57].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[57].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_minus_4_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 57; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 57; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_zvel_minus_4_front_execute; if (OPS_diags > 1) { ops_timing_realloc(57,"update_halo_kernel2_zvel_minus_4_front"); } ops_enqueue_kernel(desc); } #endif
6ad115e27ea08511130707b7ac2c0510ca1d6d53.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_minus_4_front; int xdim0_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_minus_4_front; int ydim0_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_minus_4_front; int xdim1_update_halo_kernel2_zvel_minus_4_front_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_minus_4_front; int ydim1_update_halo_kernel2_zvel_minus_4_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_minus_4_front*(y)+xdim0_update_halo_kernel2_zvel_minus_4_front*ydim0_update_halo_kernel2_zvel_minus_4_front*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_minus_4_front*(y)+xdim1_update_halo_kernel2_zvel_minus_4_front*ydim1_update_halo_kernel2_zvel_minus_4_front*(z)) //user function __device__ inline void update_halo_kernel2_zvel_minus_4_front_gpu(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = -zvel0[OPS_ACC0(0,0,-4)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = -zvel1[OPS_ACC1(0,0,-4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_minus_4_front( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_minus_4_front + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_minus_4_front * ydim0_update_halo_kernel2_zvel_minus_4_front; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_minus_4_front + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_minus_4_front * ydim1_update_halo_kernel2_zvel_minus_4_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_minus_4_front_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_minus_4_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_zvel_minus_4_front_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,57)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(57,"update_halo_kernel2_zvel_minus_4_front"); OPS_kernels[57].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_4_front_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_4_front_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_4_front_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_4_front_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_minus_4_front, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_minus_4_front_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_minus_4_front, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_minus_4_front_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_minus_4_front, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_minus_4_front_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_minus_4_front, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_minus_4_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[57].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel2_zvel_minus_4_front<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[57].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[57].mpi_time += t2-t1; OPS_kernels[57].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[57].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_zvel_minus_4_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 57; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 57; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_zvel_minus_4_front_execute; if (OPS_diags > 1) { ops_timing_realloc(57,"update_halo_kernel2_zvel_minus_4_front"); } ops_enqueue_kernel(desc); } #endif
f0ac7f7abc7aa2172abf7a91358081255dd56a2a.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** * * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THHNumerics.cuh> #include <THH/THH.h> #include <hip/hip_runtime.h> /** * Each block will handle one channel of each image **/ template <typename T> __global__ void HorizFlipImagesAndBoxes( const int N, const int C, const int H, const int W, const T* img_in, float* bboxes, const int* offsets, const float p, const float* flip, T* img_out, const bool nhwc) { // early return if not flipping if (flip[blockIdx.x] < p) return; // pointer offset into images const int img_offset = blockIdx.x * C * H * W; const T* img = &img_in[img_offset]; T* img_o = &img_out[img_offset]; // flip bboxes auto bbox_offset_begin = offsets[blockIdx.x]; auto bbox_offset_end = offsets[blockIdx.x + 1]; auto num_bboxes = bbox_offset_end - bbox_offset_begin; const int thread_idx = threadIdx.y * blockDim.x + threadIdx.x; // bboxes in ltrb format, scaled to [0, 1] for (int i = thread_idx; i < num_bboxes; i += blockDim.x * blockDim.y) { float *bbox = &bboxes[(bbox_offset_begin + thread_idx) * 4]; // Could do this inplace, but not register constrained auto bbox_0 = bbox[0]; auto bbox_2 = bbox[2]; bbox[0] = 1. - bbox_2; bbox[2] = 1. - bbox_0; } if (nhwc) { // loop over float3 pixels, handle 3 values / thread for (int h = threadIdx.y; h < H; h += blockDim.y) { for (int w = threadIdx.x; w < W; w += blockDim.x) { const T* img_hw = &img[h * W * C + w * C]; T * img_out_hw = &img_o[h * W * C + (W - 1 - w) * C]; for (int c = 0; c < C; ++c) { img_out_hw[c] = img_hw[c]; } } } } else { // loop over channels for (int c = 0; c < C; ++c) { const T* img_c = &img[c * H * W]; T *img_out_c = &img_o[c * H * W]; // handle tiles of (h, w) at a time for (int h = threadIdx.y; h < H; h += blockDim.y) { for (int w = threadIdx.x; w < W; w += blockDim.x) { const int input_idx = h * W + w; const int output_idx = h * W + (W - 1 - w); img_out_c[output_idx] = img_c[input_idx]; } } } } } /** * Take images and their bboxes, randomly flip on horizontal axis * In/Out: img: NCHW tensor of N, C-channel images of constant (H, W) * In/Out: bboxes: [N_i, 4] tensor of original bboxes in ltrb format * In: bbox_offsets: [N] offset values into bboxes * In: p \in [0, 1): probability of flipping each (img, bbox) pair * In: nhwc: Tensor in NHWC format * ---- * Note: allocate temp memory, but effectively do this inplace */ std::vector<at::Tensor> random_horiz_flip( at::Tensor& img, at::Tensor& bboxes, const at::Tensor& bbox_offsets, const float p, const bool nhwc) { // dimensions const int N = img.size(0); int C, H, W; if (nhwc) { C = img.size(3); H = img.size(1); W = img.size(2); } else { C = img.size(1); H = img.size(2); W = img.size(3); } assert(img.type().is_cuda()); assert(bboxes.type().is_cuda()); assert(bbox_offsets.type().is_cuda()); // printf("%d %d %d %d\n", N, C, H, W); // Need temp storage of size img at::Tensor tmp_img = img.clone(); at::Tensor flip = at::zeros({N}, at::CUDA(at::kFloat)).uniform_(0., 1.); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( img.type(), "HorizFlipImagesAndBoxes", [&] { hipLaunchKernelGGL(( HorizFlipImagesAndBoxes<scalar_t>), dim3(N), dim3(dim3(16, 16)), 0, stream.stream(), N, C, H, W, img.data<scalar_t>(), bboxes.data<float>(), bbox_offsets.data<int>(), p, flip.data<float>(), tmp_img.data<scalar_t>(), nhwc); THCudaCheck(hipGetLastError()); }); // copy tmp_img -> img // img = tmp_img; return {tmp_img, bboxes}; }
f0ac7f7abc7aa2172abf7a91358081255dd56a2a.cu
/****************************************************************************** * * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THCNumerics.cuh> #include <THC/THC.h> #include <cuda.h> /** * Each block will handle one channel of each image **/ template <typename T> __global__ void HorizFlipImagesAndBoxes( const int N, const int C, const int H, const int W, const T* img_in, float* bboxes, const int* offsets, const float p, const float* flip, T* img_out, const bool nhwc) { // early return if not flipping if (flip[blockIdx.x] < p) return; // pointer offset into images const int img_offset = blockIdx.x * C * H * W; const T* img = &img_in[img_offset]; T* img_o = &img_out[img_offset]; // flip bboxes auto bbox_offset_begin = offsets[blockIdx.x]; auto bbox_offset_end = offsets[blockIdx.x + 1]; auto num_bboxes = bbox_offset_end - bbox_offset_begin; const int thread_idx = threadIdx.y * blockDim.x + threadIdx.x; // bboxes in ltrb format, scaled to [0, 1] for (int i = thread_idx; i < num_bboxes; i += blockDim.x * blockDim.y) { float *bbox = &bboxes[(bbox_offset_begin + thread_idx) * 4]; // Could do this inplace, but not register constrained auto bbox_0 = bbox[0]; auto bbox_2 = bbox[2]; bbox[0] = 1. - bbox_2; bbox[2] = 1. - bbox_0; } if (nhwc) { // loop over float3 pixels, handle 3 values / thread for (int h = threadIdx.y; h < H; h += blockDim.y) { for (int w = threadIdx.x; w < W; w += blockDim.x) { const T* img_hw = &img[h * W * C + w * C]; T * img_out_hw = &img_o[h * W * C + (W - 1 - w) * C]; for (int c = 0; c < C; ++c) { img_out_hw[c] = img_hw[c]; } } } } else { // loop over channels for (int c = 0; c < C; ++c) { const T* img_c = &img[c * H * W]; T *img_out_c = &img_o[c * H * W]; // handle tiles of (h, w) at a time for (int h = threadIdx.y; h < H; h += blockDim.y) { for (int w = threadIdx.x; w < W; w += blockDim.x) { const int input_idx = h * W + w; const int output_idx = h * W + (W - 1 - w); img_out_c[output_idx] = img_c[input_idx]; } } } } } /** * Take images and their bboxes, randomly flip on horizontal axis * In/Out: img: NCHW tensor of N, C-channel images of constant (H, W) * In/Out: bboxes: [N_i, 4] tensor of original bboxes in ltrb format * In: bbox_offsets: [N] offset values into bboxes * In: p \in [0, 1): probability of flipping each (img, bbox) pair * In: nhwc: Tensor in NHWC format * ---- * Note: allocate temp memory, but effectively do this inplace */ std::vector<at::Tensor> random_horiz_flip( at::Tensor& img, at::Tensor& bboxes, const at::Tensor& bbox_offsets, const float p, const bool nhwc) { // dimensions const int N = img.size(0); int C, H, W; if (nhwc) { C = img.size(3); H = img.size(1); W = img.size(2); } else { C = img.size(1); H = img.size(2); W = img.size(3); } assert(img.type().is_cuda()); assert(bboxes.type().is_cuda()); assert(bbox_offsets.type().is_cuda()); // printf("%d %d %d %d\n", N, C, H, W); // Need temp storage of size img at::Tensor tmp_img = img.clone(); at::Tensor flip = at::zeros({N}, at::CUDA(at::kFloat)).uniform_(0., 1.); auto stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( img.type(), "HorizFlipImagesAndBoxes", [&] { HorizFlipImagesAndBoxes<scalar_t><<<N, dim3(16, 16), 0, stream.stream()>>>( N, C, H, W, img.data<scalar_t>(), bboxes.data<float>(), bbox_offsets.data<int>(), p, flip.data<float>(), tmp_img.data<scalar_t>(), nhwc); THCudaCheck(cudaGetLastError()); }); // copy tmp_img -> img // img = tmp_img; return {tmp_img, bboxes}; }
e78d1559132bee92f620b4d612b1562bc0facb0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { int index = tid % in2ScalarCount; in1_d[tid] += out[tid] * in2_x[index]; in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced! } }
e78d1559132bee92f620b4d612b1562bc0facb0b.cu
#include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { int index = tid % in2ScalarCount; in1_d[tid] += out[tid] * in2_x[index]; in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced! } }
45f71803d0e235083a699a1af749e768f41f22c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Utilities and system includes #include <helper_functions.h> #include <helper_cuda.h> #include <helper_math.h> #include <float.h> // for FLT_MAX #include "CudaMath.h" #include "dds.h" #include "permutations.h" // Definitions #define INPUT_IMAGE "lena_std.ppm" #define REFERENCE_IMAGE "lena_ref.dds" #define ERROR_THRESHOLD 0.02f //#define NUM_THREADS 64 // Number of threads per block. //#define NUM_THREADS 128 // Number of threads per block. #define NUM_THREADS 256 // Number of threads per block. //#define NUM_THREADS 512 // Number of threads per block. #define __debugsync() template <class T> __device__ inline void swap(T &a, T &b) { T tmp = a; a = b; b = tmp; } //__constant__ float3 kColorMetric = { 0.2126f, 0.7152f, 0.0722f }; __constant__ float3 kColorMetric = { 1.0f, 1.0f, 1.0f }; //////////////////////////////////////////////////////////////////////////////// // Sort colors //////////////////////////////////////////////////////////////////////////////// __device__ void sortColors(const float *values, int *ranks) { const int tid = threadIdx.x; int rank = 0; #pragma unroll for (int i = 0; i < 16; i++) { rank += (values[i] < values[tid]); } ranks[tid] = rank; // Resolve elements with the same index. for (int i = 0; i < 15; i++) { if (tid > i && ranks[tid] == ranks[i]) { ++ranks[tid]; } } } //////////////////////////////////////////////////////////////////////////////// // Load color block to shared mem //////////////////////////////////////////////////////////////////////////////// __device__ void loadColorBlock(const uint *image, float3 colors[16], float3 sums[16], int xrefs[16], int blockOffset) { const int bid = blockIdx.x + blockOffset; const int idx = threadIdx.x; __shared__ float dps[16]; float3 tmp; if (idx < 16) { // Read color and copy to shared mem. uint c = image[(bid) * 16 + idx]; colors[idx].x = ((c >> 0) & 0xFF) * (1.0f / 255.0f); colors[idx].y = ((c >> 8) & 0xFF) * (1.0f / 255.0f); colors[idx].z = ((c >> 16) & 0xFF) * (1.0f / 255.0f); // Sort colors along the best fit line. colorSums(colors, sums); float3 axis = bestFitLine(colors, sums[0]); dps[idx] = dot(colors[idx], axis); sortColors(dps, xrefs); tmp = colors[idx]; colors[xrefs[idx]] = tmp; } } //////////////////////////////////////////////////////////////////////////////// // Round color to RGB565 and expand //////////////////////////////////////////////////////////////////////////////// inline __device__ float3 roundAndExpand(float3 v, ushort *w) { v.x = rintf(__saturatef(v.x) * 31.0f); v.y = rintf(__saturatef(v.y) * 63.0f); v.z = rintf(__saturatef(v.z) * 31.0f); *w = ((ushort)v.x << 11) | ((ushort)v.y << 5) | (ushort)v.z; v.x *= 0.03227752766457f; // approximate integer bit expansion. v.y *= 0.01583151765563f; v.z *= 0.03227752766457f; return v; } __constant__ float alphaTable4[4] = { 9.0f, 0.0f, 6.0f, 3.0f }; __constant__ float alphaTable3[4] = { 4.0f, 0.0f, 2.0f, 2.0f }; __constant__ const int prods4[4] = { 0x090000,0x000900,0x040102,0x010402 }; __constant__ const int prods3[4] = { 0x040000,0x000400,0x040101,0x010401 }; #define USE_TABLES 1 //////////////////////////////////////////////////////////////////////////////// // Evaluate permutations //////////////////////////////////////////////////////////////////////////////// static __device__ float evalPermutation4(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum) { // Compute endpoints using least squares. #if USE_TABLES float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); int akku = 0; // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); alphax_sum += alphaTable4[bits & 3] * colors[i]; akku += prods4[bits & 3]; } float alpha2_sum = float(akku >> 16); float beta2_sum = float((akku >> 8) & 0xff); float alphabeta_sum = float((akku >> 0) & 0xff); float3 betax_sum = (9.0f * color_sum) - alphax_sum; #else float alpha2_sum = 0.0f; float beta2_sum = 0.0f; float alphabeta_sum = 0.0f; float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); float beta = (bits & 1); if (bits & 2) { beta = (1 + beta) * (1.0f / 3.0f); } float alpha = 1.0f - beta; alpha2_sum += alpha * alpha; beta2_sum += beta * beta; alphabeta_sum += alpha * beta; alphax_sum += alpha * colors[i]; } float3 betax_sum = color_sum - alphax_sum; #endif // alpha2, beta2, alphabeta and factor could be precomputed for each permutation, but it's faster to recompute them. const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; // Round a, b to the closest 5-6-5 color and expand... a = roundAndExpand(a, start); b = roundAndExpand(b, end); // compute the error float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); return (0.111111111111f) * dot(e, kColorMetric); } static __device__ float evalPermutation3(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum) { // Compute endpoints using least squares. #if USE_TABLES float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); int akku = 0; // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); alphax_sum += alphaTable3[bits & 3] * colors[i]; akku += prods3[bits & 3]; } float alpha2_sum = float(akku >> 16); float beta2_sum = float((akku >> 8) & 0xff); float alphabeta_sum = float((akku >> 0) & 0xff); float3 betax_sum = (4.0f * color_sum) - alphax_sum; #else float alpha2_sum = 0.0f; float beta2_sum = 0.0f; float alphabeta_sum = 0.0f; float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); float beta = (bits & 1); if (bits & 2) { beta = 0.5f; } float alpha = 1.0f - beta; alpha2_sum += alpha * alpha; beta2_sum += beta * beta; alphabeta_sum += alpha * beta; alphax_sum += alpha * colors[i]; } float3 betax_sum = color_sum - alphax_sum; #endif const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; // Round a, b to the closest 5-6-5 color and expand... a = roundAndExpand(a, start); b = roundAndExpand(b, end); // compute the error float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); return (0.25f) * dot(e, kColorMetric); } __device__ void evalAllPermutations(const float3 *colors, const uint *permutations, ushort &bestStart, ushort &bestEnd, uint &bestPermutation, float *errors, float3 color_sum) { const int idx = threadIdx.x; float bestError = FLT_MAX; __shared__ uint s_permutations[160]; for (int i = 0; i < 16; i++) { int pidx = idx + NUM_THREADS * i; if (pidx >= 992) { break; } ushort start, end; uint permutation = permutations[pidx]; if (pidx < 160) { s_permutations[pidx] = permutation; } float error = evalPermutation4(colors, permutation, &start, &end, color_sum); if (error < bestError) { bestError = error; bestPermutation = permutation; bestStart = start; bestEnd = end; } } if (bestStart < bestEnd) { swap(bestEnd, bestStart); bestPermutation ^= 0x55555555; // Flip indices. } __syncthreads(); // Sync here to ensure s_permutations is valid going forward for (int i = 0; i < 3; i++) { int pidx = idx + NUM_THREADS * i; if (pidx >= 160) { break; } ushort start, end; uint permutation = s_permutations[pidx]; float error = evalPermutation3(colors, permutation, &start, &end, color_sum); if (error < bestError) { bestError = error; bestPermutation = permutation; bestStart = start; bestEnd = end; if (bestStart > bestEnd) { swap(bestEnd, bestStart); bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices. } } } errors[idx] = bestError; } //////////////////////////////////////////////////////////////////////////////// // Find index with minimum error //////////////////////////////////////////////////////////////////////////////// __device__ int findMinError(float *errors) { const int idx = threadIdx.x; __shared__ int indices[NUM_THREADS]; indices[idx] = idx; __syncthreads(); for (int d = NUM_THREADS/2; d > 0; d >>= 1) { float err0 = errors[idx]; float err1 = (idx + d) < NUM_THREADS ? errors[idx + d] : FLT_MAX; int index1 = (idx + d) < NUM_THREADS ? indices[idx + d] : 0; __syncthreads(); if (err1 < err0) { errors[idx] = err1; indices[idx] = index1; } __syncthreads(); } return indices[0]; } //////////////////////////////////////////////////////////////////////////////// // Save DXT block //////////////////////////////////////////////////////////////////////////////// __device__ void saveBlockDXT1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 *result, int blockOffset) { const int bid = blockIdx.x + blockOffset; if (start == end) { permutation = 0; } // Reorder permutation. uint indices = 0; for (int i = 0; i < 16; i++) { int ref = xrefs[i]; indices |= ((permutation >> (2 * ref)) & 3) << (2 * i); } // Write endpoints. result[bid].x = (end << 16) | start; // Write palette indices. result[bid].y = indices; } //////////////////////////////////////////////////////////////////////////////// // Compress color block //////////////////////////////////////////////////////////////////////////////// __global__ void compress(const uint *permutations, const uint *image, uint2 *result, int blockOffset) { const int idx = threadIdx.x; __shared__ float3 colors[16]; __shared__ float3 sums[16]; __shared__ int xrefs[16]; loadColorBlock(image, colors, sums, xrefs, blockOffset); __syncthreads(); ushort bestStart, bestEnd; uint bestPermutation; __shared__ float errors[NUM_THREADS]; evalAllPermutations(colors, permutations, bestStart, bestEnd, bestPermutation, errors, sums[0]); // Use a parallel reduction to find minimum error. const int minIdx = findMinError(errors); __syncthreads(); // Only write the result of the winner thread. if (idx == minIdx) { saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result, blockOffset); } } // Helper structs and functions to validate the output of the compressor. // We cannot simply do a bitwise compare, because different compilers produce different // results for different targets due to floating point arithmetic. union Color32 { struct { unsigned char b, g, r, a; }; unsigned int u; }; union Color16 { struct { unsigned short b : 5; unsigned short g : 6; unsigned short r : 5; }; unsigned short u; }; struct BlockDXT1 { Color16 col0; Color16 col1; union { unsigned char row[4]; unsigned int indices; }; void decompress(Color32 colors[16]) const; }; void BlockDXT1::decompress(Color32 *colors) const { Color32 palette[4]; // Does bit expansion before interpolation. palette[0].b = (col0.b << 3) | (col0.b >> 2); palette[0].g = (col0.g << 2) | (col0.g >> 4); palette[0].r = (col0.r << 3) | (col0.r >> 2); palette[0].a = 0xFF; palette[1].r = (col1.r << 3) | (col1.r >> 2); palette[1].g = (col1.g << 2) | (col1.g >> 4); palette[1].b = (col1.b << 3) | (col1.b >> 2); palette[1].a = 0xFF; if (col0.u > col1.u) { // Four-color block: derive the other two colors. palette[2].r = (2 * palette[0].r + palette[1].r) / 3; palette[2].g = (2 * palette[0].g + palette[1].g) / 3; palette[2].b = (2 * palette[0].b + palette[1].b) / 3; palette[2].a = 0xFF; palette[3].r = (2 * palette[1].r + palette[0].r) / 3; palette[3].g = (2 * palette[1].g + palette[0].g) / 3; palette[3].b = (2 * palette[1].b + palette[0].b) / 3; palette[3].a = 0xFF; } else { // Three-color block: derive the other color. palette[2].r = (palette[0].r + palette[1].r) / 2; palette[2].g = (palette[0].g + palette[1].g) / 2; palette[2].b = (palette[0].b + palette[1].b) / 2; palette[2].a = 0xFF; palette[3].r = 0x00; palette[3].g = 0x00; palette[3].b = 0x00; palette[3].a = 0x00; } for (int i = 0; i < 16; i++) { colors[i] = palette[(indices >> (2*i)) & 0x3]; } } static int compareColors(const Color32 *b0, const Color32 *b1) { int sum = 0; for (int i = 0; i < 16; i++) { int r = (b0[i].r - b1[i].r); int g = (b0[i].g - b1[i].g); int b = (b0[i].b - b1[i].b); sum += r*r + g*g + b*b; } return sum; } static int compareBlock(const BlockDXT1 *b0, const BlockDXT1 *b1) { Color32 colors0[16]; Color32 colors1[16]; if (memcmp(b0, b1, sizeof(BlockDXT1)) == 0) { return 0; } else { b0->decompress(colors0); b1->decompress(colors1); return compareColors(colors0, colors1); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); // Load input image. unsigned char *data = NULL; uint W, H; char *image_path = sdkFindFilePath(INPUT_IMAGE, argv[0]); if (image_path == 0) { printf("Error, unable to find source image <%s>\n", image_path); exit(EXIT_FAILURE); } if (!sdkLoadPPM4ub(image_path, &data, &W, &H)) { printf("Error, unable to open source image file <%s>\n", image_path); exit(EXIT_FAILURE); } uint w = W, h = H; printf("Image Loaded '%s', %d x %d pixels\n\n", image_path, w, h); // Allocate input image. const uint memSize = w * h * 4; assert(0 != memSize); uint *block_image = (uint *)malloc(memSize); // Convert linear image to block linear. for (uint by = 0; by < h/4; by++) { for (uint bx = 0; bx < w/4; bx++) { for (int i = 0; i < 16; i++) { const int x = i & 3; const int y = i / 4; block_image[(by * w/4 + bx) * 16 + i] = ((uint *)data)[(by * 4 + y) * 4 * (W/4) + bx * 4 + x]; } } } // copy into global mem uint *d_data = NULL; checkCudaErrors(hipMalloc((void **) &d_data, memSize)); // Result uint *d_result = NULL; const uint compressedSize = (w / 4) * (h / 4) * 8; checkCudaErrors(hipMalloc((void **)&d_result, compressedSize)); uint *h_result = (uint *)malloc(compressedSize); // Compute permutations. uint permutations[1024]; computePermutations(permutations); // Copy permutations host to devie. uint *d_permutations = NULL; checkCudaErrors(hipMalloc((void **) &d_permutations, 1024 * sizeof(uint))); checkCudaErrors(hipMemcpy(d_permutations, permutations, 1024 * sizeof(uint), hipMemcpyHostToDevice)); // create a timer StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); // Copy image from host to device checkCudaErrors(hipMemcpy(d_data, block_image, memSize, hipMemcpyHostToDevice)); // Determine launch configuration and run timed computation numIterations times uint blocks = ((w + 3) / 4) * ((h + 3) / 4); // rounds up by 1 block in each dim if %4 != 0 int devID; hipDeviceProp_t deviceProp; // get number of SMs on this GPU checkCudaErrors(hipGetDevice(&devID)); checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID)); // Restrict the numbers of blocks to launch on low end GPUs to avoid kernel timeout int blocksPerLaunch = min(blocks, 768 * deviceProp.multiProcessorCount); printf("Running DXT Compression on %u x %u image...\n", w, h); printf("\n%u Blocks, %u Threads per Block, %u Threads in Grid...\n\n", blocks, NUM_THREADS, blocks * NUM_THREADS); int numIterations = 1; for (int i = -1; i < numIterations; ++i) { if (i == 0) { checkCudaErrors(hipDeviceSynchronize()); sdkStartTimer(&timer); } for (int j=0; j<(int)blocks; j+=blocksPerLaunch) { hipLaunchKernelGGL(( compress), dim3(min(blocksPerLaunch, blocks-j)), dim3(NUM_THREADS), 0, 0, d_permutations, d_data, (uint2 *)d_result, j); } } getLastCudaError("compress"); // sync to host, stop timer, record perf checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&timer); double dAvgTime = 1.0e-3 * sdkGetTimerValue(&timer)/(double)numIterations; printf("dxtc, Throughput = %.4f MPixels/s, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %d\n", (1.0e-6 * (double)(W * H)/ dAvgTime), dAvgTime, (W * H), 1, NUM_THREADS); // copy result data from device to host checkCudaErrors(hipMemcpy(h_result, d_result, compressedSize, hipMemcpyDeviceToHost)); // Write out result data to DDS file char output_filename[1024]; strcpy(output_filename, image_path); strcpy(output_filename + strlen(image_path) - 3, "dds"); FILE *fp = fopen(output_filename, "wb"); if (fp == 0) { printf("Error, unable to open output image <%s>\n", output_filename); exit(EXIT_FAILURE); } DDSHeader header; header.fourcc = FOURCC_DDS; header.size = 124; header.flags = (DDSD_WIDTH|DDSD_HEIGHT|DDSD_CAPS|DDSD_PIXELFORMAT|DDSD_LINEARSIZE); header.height = h; header.width = w; header.pitch = compressedSize; header.depth = 0; header.mipmapcount = 0; memset(header.reserved, 0, sizeof(header.reserved)); header.pf.size = 32; header.pf.flags = DDPF_FOURCC; header.pf.fourcc = FOURCC_DXT1; header.pf.bitcount = 0; header.pf.rmask = 0; header.pf.gmask = 0; header.pf.bmask = 0; header.pf.amask = 0; header.caps.caps1 = DDSCAPS_TEXTURE; header.caps.caps2 = 0; header.caps.caps3 = 0; header.caps.caps4 = 0; header.notused = 0; fwrite(&header, sizeof(DDSHeader), 1, fp); fwrite(h_result, compressedSize, 1, fp); fclose(fp); // Make sure the generated image is correct. const char *reference_image_path = sdkFindFilePath(REFERENCE_IMAGE, argv[0]); if (reference_image_path == 0) { printf("Error, unable to find reference image\n"); exit(EXIT_FAILURE); } fp = fopen(reference_image_path, "rb"); if (fp == 0) { printf("Error, unable to open reference image\n"); exit(EXIT_FAILURE); } fseek(fp, sizeof(DDSHeader), SEEK_SET); uint referenceSize = (W / 4) * (H / 4) * 8; uint *reference = (uint *)malloc(referenceSize); fread(reference, referenceSize, 1, fp); fclose(fp); printf("\nChecking accuracy...\n"); float rms = 0; for (uint y = 0; y < h; y += 4) { for (uint x = 0; x < w; x += 4) { uint referenceBlockIdx = ((y/4) * (W/4) + (x/4)); uint resultBlockIdx = ((y/4) * (w/4) + (x/4)); int cmp = compareBlock(((BlockDXT1 *)h_result) + resultBlockIdx, ((BlockDXT1 *)reference) + referenceBlockIdx); if (cmp != 0.0f) { printf("Deviation at (%4d,%4d):\t%f rms\n", x/4, y/4, float(cmp)/16/3); } rms += cmp; } } rms /= w * h * 3; // Free allocated resources and exit checkCudaErrors(hipFree(d_permutations)); checkCudaErrors(hipFree(d_data)); checkCudaErrors(hipFree(d_result)); free(image_path); free(data); free(block_image); free(h_result); free(reference); sdkDeleteTimer(&timer); printf("RMS(reference, result) = %f\n\n", rms); printf(rms <= ERROR_THRESHOLD ? "Test passed\n" : "Test failed!\n"); /* Return zero if test passed, one otherwise */ return rms > ERROR_THRESHOLD; }
45f71803d0e235083a699a1af749e768f41f22c2.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Utilities and system includes #include <helper_functions.h> #include <helper_cuda.h> #include <helper_math.h> #include <float.h> // for FLT_MAX #include "CudaMath.h" #include "dds.h" #include "permutations.h" // Definitions #define INPUT_IMAGE "lena_std.ppm" #define REFERENCE_IMAGE "lena_ref.dds" #define ERROR_THRESHOLD 0.02f //#define NUM_THREADS 64 // Number of threads per block. //#define NUM_THREADS 128 // Number of threads per block. #define NUM_THREADS 256 // Number of threads per block. //#define NUM_THREADS 512 // Number of threads per block. #define __debugsync() template <class T> __device__ inline void swap(T &a, T &b) { T tmp = a; a = b; b = tmp; } //__constant__ float3 kColorMetric = { 0.2126f, 0.7152f, 0.0722f }; __constant__ float3 kColorMetric = { 1.0f, 1.0f, 1.0f }; //////////////////////////////////////////////////////////////////////////////// // Sort colors //////////////////////////////////////////////////////////////////////////////// __device__ void sortColors(const float *values, int *ranks) { const int tid = threadIdx.x; int rank = 0; #pragma unroll for (int i = 0; i < 16; i++) { rank += (values[i] < values[tid]); } ranks[tid] = rank; // Resolve elements with the same index. for (int i = 0; i < 15; i++) { if (tid > i && ranks[tid] == ranks[i]) { ++ranks[tid]; } } } //////////////////////////////////////////////////////////////////////////////// // Load color block to shared mem //////////////////////////////////////////////////////////////////////////////// __device__ void loadColorBlock(const uint *image, float3 colors[16], float3 sums[16], int xrefs[16], int blockOffset) { const int bid = blockIdx.x + blockOffset; const int idx = threadIdx.x; __shared__ float dps[16]; float3 tmp; if (idx < 16) { // Read color and copy to shared mem. uint c = image[(bid) * 16 + idx]; colors[idx].x = ((c >> 0) & 0xFF) * (1.0f / 255.0f); colors[idx].y = ((c >> 8) & 0xFF) * (1.0f / 255.0f); colors[idx].z = ((c >> 16) & 0xFF) * (1.0f / 255.0f); // Sort colors along the best fit line. colorSums(colors, sums); float3 axis = bestFitLine(colors, sums[0]); dps[idx] = dot(colors[idx], axis); sortColors(dps, xrefs); tmp = colors[idx]; colors[xrefs[idx]] = tmp; } } //////////////////////////////////////////////////////////////////////////////// // Round color to RGB565 and expand //////////////////////////////////////////////////////////////////////////////// inline __device__ float3 roundAndExpand(float3 v, ushort *w) { v.x = rintf(__saturatef(v.x) * 31.0f); v.y = rintf(__saturatef(v.y) * 63.0f); v.z = rintf(__saturatef(v.z) * 31.0f); *w = ((ushort)v.x << 11) | ((ushort)v.y << 5) | (ushort)v.z; v.x *= 0.03227752766457f; // approximate integer bit expansion. v.y *= 0.01583151765563f; v.z *= 0.03227752766457f; return v; } __constant__ float alphaTable4[4] = { 9.0f, 0.0f, 6.0f, 3.0f }; __constant__ float alphaTable3[4] = { 4.0f, 0.0f, 2.0f, 2.0f }; __constant__ const int prods4[4] = { 0x090000,0x000900,0x040102,0x010402 }; __constant__ const int prods3[4] = { 0x040000,0x000400,0x040101,0x010401 }; #define USE_TABLES 1 //////////////////////////////////////////////////////////////////////////////// // Evaluate permutations //////////////////////////////////////////////////////////////////////////////// static __device__ float evalPermutation4(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum) { // Compute endpoints using least squares. #if USE_TABLES float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); int akku = 0; // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); alphax_sum += alphaTable4[bits & 3] * colors[i]; akku += prods4[bits & 3]; } float alpha2_sum = float(akku >> 16); float beta2_sum = float((akku >> 8) & 0xff); float alphabeta_sum = float((akku >> 0) & 0xff); float3 betax_sum = (9.0f * color_sum) - alphax_sum; #else float alpha2_sum = 0.0f; float beta2_sum = 0.0f; float alphabeta_sum = 0.0f; float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); float beta = (bits & 1); if (bits & 2) { beta = (1 + beta) * (1.0f / 3.0f); } float alpha = 1.0f - beta; alpha2_sum += alpha * alpha; beta2_sum += beta * beta; alphabeta_sum += alpha * beta; alphax_sum += alpha * colors[i]; } float3 betax_sum = color_sum - alphax_sum; #endif // alpha2, beta2, alphabeta and factor could be precomputed for each permutation, but it's faster to recompute them. const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; // Round a, b to the closest 5-6-5 color and expand... a = roundAndExpand(a, start); b = roundAndExpand(b, end); // compute the error float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); return (0.111111111111f) * dot(e, kColorMetric); } static __device__ float evalPermutation3(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum) { // Compute endpoints using least squares. #if USE_TABLES float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); int akku = 0; // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); alphax_sum += alphaTable3[bits & 3] * colors[i]; akku += prods3[bits & 3]; } float alpha2_sum = float(akku >> 16); float beta2_sum = float((akku >> 8) & 0xff); float alphabeta_sum = float((akku >> 0) & 0xff); float3 betax_sum = (4.0f * color_sum) - alphax_sum; #else float alpha2_sum = 0.0f; float beta2_sum = 0.0f; float alphabeta_sum = 0.0f; float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); float beta = (bits & 1); if (bits & 2) { beta = 0.5f; } float alpha = 1.0f - beta; alpha2_sum += alpha * alpha; beta2_sum += beta * beta; alphabeta_sum += alpha * beta; alphax_sum += alpha * colors[i]; } float3 betax_sum = color_sum - alphax_sum; #endif const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; // Round a, b to the closest 5-6-5 color and expand... a = roundAndExpand(a, start); b = roundAndExpand(b, end); // compute the error float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); return (0.25f) * dot(e, kColorMetric); } __device__ void evalAllPermutations(const float3 *colors, const uint *permutations, ushort &bestStart, ushort &bestEnd, uint &bestPermutation, float *errors, float3 color_sum) { const int idx = threadIdx.x; float bestError = FLT_MAX; __shared__ uint s_permutations[160]; for (int i = 0; i < 16; i++) { int pidx = idx + NUM_THREADS * i; if (pidx >= 992) { break; } ushort start, end; uint permutation = permutations[pidx]; if (pidx < 160) { s_permutations[pidx] = permutation; } float error = evalPermutation4(colors, permutation, &start, &end, color_sum); if (error < bestError) { bestError = error; bestPermutation = permutation; bestStart = start; bestEnd = end; } } if (bestStart < bestEnd) { swap(bestEnd, bestStart); bestPermutation ^= 0x55555555; // Flip indices. } __syncthreads(); // Sync here to ensure s_permutations is valid going forward for (int i = 0; i < 3; i++) { int pidx = idx + NUM_THREADS * i; if (pidx >= 160) { break; } ushort start, end; uint permutation = s_permutations[pidx]; float error = evalPermutation3(colors, permutation, &start, &end, color_sum); if (error < bestError) { bestError = error; bestPermutation = permutation; bestStart = start; bestEnd = end; if (bestStart > bestEnd) { swap(bestEnd, bestStart); bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices. } } } errors[idx] = bestError; } //////////////////////////////////////////////////////////////////////////////// // Find index with minimum error //////////////////////////////////////////////////////////////////////////////// __device__ int findMinError(float *errors) { const int idx = threadIdx.x; __shared__ int indices[NUM_THREADS]; indices[idx] = idx; __syncthreads(); for (int d = NUM_THREADS/2; d > 0; d >>= 1) { float err0 = errors[idx]; float err1 = (idx + d) < NUM_THREADS ? errors[idx + d] : FLT_MAX; int index1 = (idx + d) < NUM_THREADS ? indices[idx + d] : 0; __syncthreads(); if (err1 < err0) { errors[idx] = err1; indices[idx] = index1; } __syncthreads(); } return indices[0]; } //////////////////////////////////////////////////////////////////////////////// // Save DXT block //////////////////////////////////////////////////////////////////////////////// __device__ void saveBlockDXT1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 *result, int blockOffset) { const int bid = blockIdx.x + blockOffset; if (start == end) { permutation = 0; } // Reorder permutation. uint indices = 0; for (int i = 0; i < 16; i++) { int ref = xrefs[i]; indices |= ((permutation >> (2 * ref)) & 3) << (2 * i); } // Write endpoints. result[bid].x = (end << 16) | start; // Write palette indices. result[bid].y = indices; } //////////////////////////////////////////////////////////////////////////////// // Compress color block //////////////////////////////////////////////////////////////////////////////// __global__ void compress(const uint *permutations, const uint *image, uint2 *result, int blockOffset) { const int idx = threadIdx.x; __shared__ float3 colors[16]; __shared__ float3 sums[16]; __shared__ int xrefs[16]; loadColorBlock(image, colors, sums, xrefs, blockOffset); __syncthreads(); ushort bestStart, bestEnd; uint bestPermutation; __shared__ float errors[NUM_THREADS]; evalAllPermutations(colors, permutations, bestStart, bestEnd, bestPermutation, errors, sums[0]); // Use a parallel reduction to find minimum error. const int minIdx = findMinError(errors); __syncthreads(); // Only write the result of the winner thread. if (idx == minIdx) { saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result, blockOffset); } } // Helper structs and functions to validate the output of the compressor. // We cannot simply do a bitwise compare, because different compilers produce different // results for different targets due to floating point arithmetic. union Color32 { struct { unsigned char b, g, r, a; }; unsigned int u; }; union Color16 { struct { unsigned short b : 5; unsigned short g : 6; unsigned short r : 5; }; unsigned short u; }; struct BlockDXT1 { Color16 col0; Color16 col1; union { unsigned char row[4]; unsigned int indices; }; void decompress(Color32 colors[16]) const; }; void BlockDXT1::decompress(Color32 *colors) const { Color32 palette[4]; // Does bit expansion before interpolation. palette[0].b = (col0.b << 3) | (col0.b >> 2); palette[0].g = (col0.g << 2) | (col0.g >> 4); palette[0].r = (col0.r << 3) | (col0.r >> 2); palette[0].a = 0xFF; palette[1].r = (col1.r << 3) | (col1.r >> 2); palette[1].g = (col1.g << 2) | (col1.g >> 4); palette[1].b = (col1.b << 3) | (col1.b >> 2); palette[1].a = 0xFF; if (col0.u > col1.u) { // Four-color block: derive the other two colors. palette[2].r = (2 * palette[0].r + palette[1].r) / 3; palette[2].g = (2 * palette[0].g + palette[1].g) / 3; palette[2].b = (2 * palette[0].b + palette[1].b) / 3; palette[2].a = 0xFF; palette[3].r = (2 * palette[1].r + palette[0].r) / 3; palette[3].g = (2 * palette[1].g + palette[0].g) / 3; palette[3].b = (2 * palette[1].b + palette[0].b) / 3; palette[3].a = 0xFF; } else { // Three-color block: derive the other color. palette[2].r = (palette[0].r + palette[1].r) / 2; palette[2].g = (palette[0].g + palette[1].g) / 2; palette[2].b = (palette[0].b + palette[1].b) / 2; palette[2].a = 0xFF; palette[3].r = 0x00; palette[3].g = 0x00; palette[3].b = 0x00; palette[3].a = 0x00; } for (int i = 0; i < 16; i++) { colors[i] = palette[(indices >> (2*i)) & 0x3]; } } static int compareColors(const Color32 *b0, const Color32 *b1) { int sum = 0; for (int i = 0; i < 16; i++) { int r = (b0[i].r - b1[i].r); int g = (b0[i].g - b1[i].g); int b = (b0[i].b - b1[i].b); sum += r*r + g*g + b*b; } return sum; } static int compareBlock(const BlockDXT1 *b0, const BlockDXT1 *b1) { Color32 colors0[16]; Color32 colors1[16]; if (memcmp(b0, b1, sizeof(BlockDXT1)) == 0) { return 0; } else { b0->decompress(colors0); b1->decompress(colors1); return compareColors(colors0, colors1); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("%s Starting...\n\n", argv[0]); // use command-line specified CUDA device, otherwise use device with highest Gflops/s findCudaDevice(argc, (const char **)argv); // Load input image. unsigned char *data = NULL; uint W, H; char *image_path = sdkFindFilePath(INPUT_IMAGE, argv[0]); if (image_path == 0) { printf("Error, unable to find source image <%s>\n", image_path); exit(EXIT_FAILURE); } if (!sdkLoadPPM4ub(image_path, &data, &W, &H)) { printf("Error, unable to open source image file <%s>\n", image_path); exit(EXIT_FAILURE); } uint w = W, h = H; printf("Image Loaded '%s', %d x %d pixels\n\n", image_path, w, h); // Allocate input image. const uint memSize = w * h * 4; assert(0 != memSize); uint *block_image = (uint *)malloc(memSize); // Convert linear image to block linear. for (uint by = 0; by < h/4; by++) { for (uint bx = 0; bx < w/4; bx++) { for (int i = 0; i < 16; i++) { const int x = i & 3; const int y = i / 4; block_image[(by * w/4 + bx) * 16 + i] = ((uint *)data)[(by * 4 + y) * 4 * (W/4) + bx * 4 + x]; } } } // copy into global mem uint *d_data = NULL; checkCudaErrors(cudaMalloc((void **) &d_data, memSize)); // Result uint *d_result = NULL; const uint compressedSize = (w / 4) * (h / 4) * 8; checkCudaErrors(cudaMalloc((void **)&d_result, compressedSize)); uint *h_result = (uint *)malloc(compressedSize); // Compute permutations. uint permutations[1024]; computePermutations(permutations); // Copy permutations host to devie. uint *d_permutations = NULL; checkCudaErrors(cudaMalloc((void **) &d_permutations, 1024 * sizeof(uint))); checkCudaErrors(cudaMemcpy(d_permutations, permutations, 1024 * sizeof(uint), cudaMemcpyHostToDevice)); // create a timer StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); // Copy image from host to device checkCudaErrors(cudaMemcpy(d_data, block_image, memSize, cudaMemcpyHostToDevice)); // Determine launch configuration and run timed computation numIterations times uint blocks = ((w + 3) / 4) * ((h + 3) / 4); // rounds up by 1 block in each dim if %4 != 0 int devID; cudaDeviceProp deviceProp; // get number of SMs on this GPU checkCudaErrors(cudaGetDevice(&devID)); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID)); // Restrict the numbers of blocks to launch on low end GPUs to avoid kernel timeout int blocksPerLaunch = min(blocks, 768 * deviceProp.multiProcessorCount); printf("Running DXT Compression on %u x %u image...\n", w, h); printf("\n%u Blocks, %u Threads per Block, %u Threads in Grid...\n\n", blocks, NUM_THREADS, blocks * NUM_THREADS); int numIterations = 1; for (int i = -1; i < numIterations; ++i) { if (i == 0) { checkCudaErrors(cudaDeviceSynchronize()); sdkStartTimer(&timer); } for (int j=0; j<(int)blocks; j+=blocksPerLaunch) { compress<<<min(blocksPerLaunch, blocks-j), NUM_THREADS>>>(d_permutations, d_data, (uint2 *)d_result, j); } } getLastCudaError("compress"); // sync to host, stop timer, record perf checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&timer); double dAvgTime = 1.0e-3 * sdkGetTimerValue(&timer)/(double)numIterations; printf("dxtc, Throughput = %.4f MPixels/s, Time = %.5f s, Size = %u Pixels, NumDevsUsed = %i, Workgroup = %d\n", (1.0e-6 * (double)(W * H)/ dAvgTime), dAvgTime, (W * H), 1, NUM_THREADS); // copy result data from device to host checkCudaErrors(cudaMemcpy(h_result, d_result, compressedSize, cudaMemcpyDeviceToHost)); // Write out result data to DDS file char output_filename[1024]; strcpy(output_filename, image_path); strcpy(output_filename + strlen(image_path) - 3, "dds"); FILE *fp = fopen(output_filename, "wb"); if (fp == 0) { printf("Error, unable to open output image <%s>\n", output_filename); exit(EXIT_FAILURE); } DDSHeader header; header.fourcc = FOURCC_DDS; header.size = 124; header.flags = (DDSD_WIDTH|DDSD_HEIGHT|DDSD_CAPS|DDSD_PIXELFORMAT|DDSD_LINEARSIZE); header.height = h; header.width = w; header.pitch = compressedSize; header.depth = 0; header.mipmapcount = 0; memset(header.reserved, 0, sizeof(header.reserved)); header.pf.size = 32; header.pf.flags = DDPF_FOURCC; header.pf.fourcc = FOURCC_DXT1; header.pf.bitcount = 0; header.pf.rmask = 0; header.pf.gmask = 0; header.pf.bmask = 0; header.pf.amask = 0; header.caps.caps1 = DDSCAPS_TEXTURE; header.caps.caps2 = 0; header.caps.caps3 = 0; header.caps.caps4 = 0; header.notused = 0; fwrite(&header, sizeof(DDSHeader), 1, fp); fwrite(h_result, compressedSize, 1, fp); fclose(fp); // Make sure the generated image is correct. const char *reference_image_path = sdkFindFilePath(REFERENCE_IMAGE, argv[0]); if (reference_image_path == 0) { printf("Error, unable to find reference image\n"); exit(EXIT_FAILURE); } fp = fopen(reference_image_path, "rb"); if (fp == 0) { printf("Error, unable to open reference image\n"); exit(EXIT_FAILURE); } fseek(fp, sizeof(DDSHeader), SEEK_SET); uint referenceSize = (W / 4) * (H / 4) * 8; uint *reference = (uint *)malloc(referenceSize); fread(reference, referenceSize, 1, fp); fclose(fp); printf("\nChecking accuracy...\n"); float rms = 0; for (uint y = 0; y < h; y += 4) { for (uint x = 0; x < w; x += 4) { uint referenceBlockIdx = ((y/4) * (W/4) + (x/4)); uint resultBlockIdx = ((y/4) * (w/4) + (x/4)); int cmp = compareBlock(((BlockDXT1 *)h_result) + resultBlockIdx, ((BlockDXT1 *)reference) + referenceBlockIdx); if (cmp != 0.0f) { printf("Deviation at (%4d,%4d):\t%f rms\n", x/4, y/4, float(cmp)/16/3); } rms += cmp; } } rms /= w * h * 3; // Free allocated resources and exit checkCudaErrors(cudaFree(d_permutations)); checkCudaErrors(cudaFree(d_data)); checkCudaErrors(cudaFree(d_result)); free(image_path); free(data); free(block_image); free(h_result); free(reference); sdkDeleteTimer(&timer); printf("RMS(reference, result) = %f\n\n", rms); printf(rms <= ERROR_THRESHOLD ? "Test passed\n" : "Test failed!\n"); /* Return zero if test passed, one otherwise */ return rms > ERROR_THRESHOLD; }
5c98272f546a37adfdee10d464a6d3946502e5fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void bound_kernel(const int n, const Dtype* a, const Dtype min_val, const Dtype max_val, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = min(max(a[index], min_val), max_val); } } template <> void caffe_gpu_bound<float>(const int N, const float* a, const float min_val, const float max_val, float* y) { // NOLINT_NEXT_LINE(*) hipLaunchKernelGGL(( bound_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, min_val, max_val, y); } template <> void caffe_gpu_bound<double>(const int N, const double* a, const double min_val, const double max_val, double* y) { // NOLINT_NEXT_LINE(*) hipLaunchKernelGGL(( bound_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, min_val, max_val, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
5c98272f546a37adfdee10d464a6d3946502e5fb.cu
#include <algorithm> #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include <cstdlib> #include <cstring> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void bound_kernel(const int n, const Dtype* a, const Dtype min_val, const Dtype max_val, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = min(max(a[index], min_val), max_val); } } template <> void caffe_gpu_bound<float>(const int N, const float* a, const float min_val, const float max_val, float* y) { // NOLINT_NEXT_LINE(*) bound_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, min_val, max_val, y); } template <> void caffe_gpu_bound<double>(const int N, const double* a, const double min_val, const double max_val, double* y) { // NOLINT_NEXT_LINE(*) bound_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, min_val, max_val, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); __global__ void popc_kernel(const int n, const float* a, const float* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popc(static_cast<uint32_t>(a[index]) ^ static_cast<uint32_t>(b[index])); } } __global__ void popcll_kernel(const int n, const double* a, const double* b, uint8_t* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = __popcll(static_cast<uint64_t>(a[index]) ^ static_cast<uint64_t>(b[index])); } } template <> uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x, const float* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), (uint32_t) 0, thrust::plus<uint32_t>()); } template <> uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x, const double* y) { // TODO: Fix caffe_gpu_hamming_distance (see failing unit test // TestHammingDistanceGPU in test_math_functions.cpp). NOT_IMPLEMENTED; thrust::device_vector<uint8_t> popcounts(n); // NOLINT_NEXT_LINE(whitespace/operators) popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, x, y, thrust::raw_pointer_cast(popcounts.data())); return thrust::reduce(popcounts.begin(), popcounts.end(), /* NOLINT_NEXT_LINE(build/include_what_you_use) */ (uint32_t) 0, thrust::plus<uint32_t>()); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
44f1241a3f3d501af6956db62e0023453b79263d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "RipplingMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t) { RipplingMath ripplingMath = RipplingMath(w, h); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; int s = TID; int i; int j; while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t); s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
44f1241a3f3d501af6956db62e0023453b79263d.cu
#include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "RipplingMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t) { RipplingMath ripplingMath = RipplingMath(w, h); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; int s = TID; int i; int j; while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t); s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
53635923237d3529757447c25d5d143f88ae5551.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { #include "convolve.h" #include <stdint.h> } // http://developer.download.nvidia.com/assets/cuda/files/convolutionSeparable.pdf /*! * USR: user specified identifier for this convolution * IN_T: input pixel type * OUT_T: output pixel type * MTX_T: matrix scalar type (this type is used for intermediate results!) * MTX_S: matrix size * BLOCK_W: block width (block x dimension); note: BLOCK_W must be >= (MTX_S / 2) to fill apron * BLOCK_H: block height (block y dimension) * STEPS: number of convolution steps performed (number of output pixels written) by each thread */ #define DECL_CU_CONVOLUTION_ROW(USR, IN_T, OUT_T, MTX_T, MTX_S, BLOCK_W, BLOCK_H, STEPS) \ __global__ void convol_row_k_##USR##MTX_S(IN_T *in, OUT_T *out, MTX_T *mtx, MTX_T div, int img_w) \ { \ __shared__ IN_T bl_d[(BLOCK_H)][((STEPS) + 2) * (BLOCK_W)]; \ \ /* offset to left edge of apron */ \ const int off_x = (blockIdx.x * (STEPS) - 1) * (BLOCK_W) + threadIdx.x; \ const int off_y = blockIdx.y * (BLOCK_H) + threadIdx.y; \ \ in += off_y * img_w + off_x; \ out += off_y * img_w + off_x; \ \ /* left apron */ \ bl_d[threadIdx.y][threadIdx.x] = (off_x >= 0) ? in[0] : in[-off_x]; \ \ /* main data */ \ _Pragma("unroll") \ for (int i = 1; i <= (STEPS); i++) \ bl_d[threadIdx.y][threadIdx.x + i * (BLOCK_W)] = in[i * (BLOCK_W)]; \ \ /* right apron */ \ bl_d[threadIdx.y][threadIdx.x + ((STEPS) + 1) * (BLOCK_W)] = (img_w - off_x > ((STEPS) + 1) * (BLOCK_W)) ? \ in[((STEPS) + 1) * (BLOCK_W)] : in[img_w - off_x - 1]; \ \ __syncthreads(); \ \ _Pragma("unroll") \ for (int i = 1; i <= (STEPS); i++) { \ MTX_T sum = 0; \ \ _Pragma("unroll") \ for (int j = -(MTX_S / 2); j <= ((MTX_S - 1) / 2); j++) \ sum += mtx[(MTX_S / 2) + j] * bl_d[threadIdx.y][threadIdx.x + i * (BLOCK_W) + j]; \ \ out[i * (BLOCK_W)] = sum / div; \ } \ } \ \ static void cu_convolve_row_##USR##MTX_S(void *gm_in, void *gm_out, void *gm_mtx, MTX_T div, int img_w, int img_h) \ { \ dim3 blocks(img_w / ((BLOCK_W) * (STEPS)), img_h / (BLOCK_H)); \ dim3 threads((BLOCK_W), (BLOCK_H)); \ \ hipLaunchKernelGGL(( convol_row_k_##USR##MTX_S), dim3(blocks), dim3(threads), 0, 0, (IN_T *)gm_in, (OUT_T *)gm_out, (MTX_T *)gm_mtx, div, img_w); \ } /*! * USR: user specified identifier for this convolution * IN_T: input pixel type * OUT_T: output pixel type * MTX_T: matrix scalar type (this type is used for intermediate results!) * MTX_S: matrix size * BLOCK_W: block width (block x dimension) * BLOCK_H: block height (block y dimension); note: BLOCK_H must be >= (MTX_S / 2) to fill apron * STEPS: number of convolution steps performed (number of output pixels written) by each thread */ #define DECL_CU_CONVOLUTION_COL(USR, IN_T, OUT_T, MTX_T, MTX_S, BLOCK_W, BLOCK_H, STEPS) \ __global__ void convol_col_k_##USR##MTX_S(IN_T *in, OUT_T *out, MTX_T *mtx, MTX_T div, int img_w, int img_h) \ { \ __shared__ IN_T bl_d[(BLOCK_W)][((STEPS) + 2) * (BLOCK_H)]; /* +1 */ \ \ /* offset to upper edge of apron */ \ const int off_x = blockIdx.x * (BLOCK_W) + threadIdx.x; \ const int off_y = (blockIdx.y * (STEPS) - 1) * (BLOCK_H) + threadIdx.y; \ \ in += off_y * img_w + off_x; \ out += off_y * img_w + off_x; \ \ /* upper apron */ \ bl_d[threadIdx.x][threadIdx.y] = (off_y >= 0) ? in[0] : in[-off_y * img_w]; \ \ /* main data */ \ _Pragma("unroll") \ for (int i = 1; i <= (STEPS); i++) \ bl_d[threadIdx.x][threadIdx.y + i * (BLOCK_H)] = in[i * (BLOCK_H) * img_w]; \ \ /* lower apron */ \ bl_d[threadIdx.x][threadIdx.y + ((STEPS) + 1) * (BLOCK_H)] = (img_h - off_y > ((STEPS) + 1) * (BLOCK_H)) ? \ in[((STEPS) + 1) * (BLOCK_H) * img_w] : in[(img_h - off_y - 1) * img_w]; \ \ __syncthreads(); \ \ _Pragma("unroll") \ for (int i = 1; i <= (STEPS); i++) { \ MTX_T sum = 0; \ \ _Pragma("unroll") \ for (int j = -(MTX_S / 2); j <= ((MTX_S - 1) / 2); j++) \ sum += mtx[(MTX_S / 2) + j] * bl_d[threadIdx.x][threadIdx.y + i * (BLOCK_H) + j]; \ \ out[i * (BLOCK_H) * img_w] = sum / div; \ } \ } \ \ static void cu_convolve_col_##USR##MTX_S(void *gm_in, void *gm_out, void *gm_mtx, MTX_T div, int img_w, int img_h) \ { \ dim3 blocks(img_w / (BLOCK_W), img_h / ((BLOCK_H) * (STEPS))); \ dim3 threads((BLOCK_W), (BLOCK_H)); \ \ hipLaunchKernelGGL(( convol_col_k_##USR##MTX_S), dim3(blocks), dim3(threads), 0, 0, (IN_T *)gm_in, (OUT_T *)gm_out, (MTX_T *)gm_mtx, div, img_w, img_h);\ } #define WARP_SIZE 32 DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 3, 1, (WARP_SIZE / 1), 8) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 3, (WARP_SIZE / 1), 1, 8) DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 5, 2, (WARP_SIZE / 2), 8) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 5, (WARP_SIZE / 2), 2, 8) DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 7, 4, (WARP_SIZE / 4), 4) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 7, (WARP_SIZE / 4), 4, 4) DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 9, 4, (WARP_SIZE / 4), 4) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 9, (WARP_SIZE / 4), 4, 4) DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 11, 8, (WARP_SIZE / 8), 4) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 11, (WARP_SIZE / 8), 8, 4) __constant__ int gauss_mtx_3[3] = { 1, 2, 1 }; __constant__ int gauss_mtx_5[5] = { 1, 4, 6, 4, 1 }; __constant__ int gauss_mtx_7[7] = { 1, 6, 15, 20, 15, 6, 1 }; __constant__ int gauss_mtx_9[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 }; __constant__ int gauss_mtx_11[11] = { 1, 10, 45, 120, 210, 252, 210, 120, 45, 10, 1 }; extern "C" int cu_gauss_filter(int rad, int img_w, int img_h, void *gm_in, void *gm_out, void *gm_tmp) { void *mtx; switch (rad) { case 3: hipGetSymbolAddress(&mtx, gauss_mtx_3); cu_convolve_row_gauss3(gm_in, gm_tmp, mtx, 4, img_w, img_h); cu_convolve_col_gauss3(gm_tmp, gm_out, mtx, 4, img_w, img_h); break; case 5: hipGetSymbolAddress(&mtx, gauss_mtx_5); cu_convolve_row_gauss5(gm_in, gm_tmp, mtx, 16, img_w, img_h); cu_convolve_col_gauss5(gm_tmp, gm_out, mtx, 16, img_w, img_h); break; case 7: hipGetSymbolAddress(&mtx, gauss_mtx_7); cu_convolve_row_gauss7(gm_in, gm_tmp, mtx, 64, img_w, img_h); cu_convolve_col_gauss7(gm_tmp, gm_out, mtx, 64, img_w, img_h); break; case 9: hipGetSymbolAddress(&mtx, gauss_mtx_9); cu_convolve_row_gauss9(gm_in, gm_tmp, mtx, 256, img_w, img_h); cu_convolve_col_gauss9(gm_tmp, gm_out, mtx, 256, img_w, img_h); break; case 11: hipGetSymbolAddress(&mtx, gauss_mtx_11); cu_convolve_row_gauss11(gm_in, gm_tmp, mtx, 1024, img_w, img_h); cu_convolve_col_gauss11(gm_tmp, gm_out, mtx, 1024, img_w, img_h); break; default: return -1; } return 0; } DECL_CU_CONVOLUTION_ROW(gaussf, float, float, float, 11, 6, (WARP_SIZE / 6), 4) DECL_CU_CONVOLUTION_COL(gaussf, float, float, float, 11, (WARP_SIZE / 6), 6, 4) __constant__ float gauss_mtx_f11[11] = { 1.0, 10.0, 45.0, 120.0, 210.0, 252.0, 210.0, 120.0, 45.0, 10.0, 1.0 }; extern "C" void cu_gauss_filter_f11(int img_w, int img_h, void *gm_in, void *gm_out, void *gm_tmp) { void *mtx; hipGetSymbolAddress(&mtx, gauss_mtx_f11); cu_convolve_row_gaussf11(gm_in, gm_tmp, mtx, 1024.0, img_w, img_h); cu_convolve_col_gaussf11(gm_tmp, gm_out, mtx, 1024.0, img_w, img_h); } DECL_CU_CONVOLUTION_ROW(sobel, uint8_t, int16_t, int, 3, 1, (WARP_SIZE / 1), 8); DECL_CU_CONVOLUTION_COL(sobel, int16_t, int16_t, int, 3, (WARP_SIZE / 1), 1, 8); __constant__ int sobel_mtx_1[3] = { -1, 0, 1 }; __constant__ int sobel_mtx_2[3] = { 1, 2, 1 }; extern "C" int cu_sobel_filter(int img_w, int img_h, void *gm_in, void *gm_hori, void *gm_vert, void *gm_tmp) { void *mtx1, *mtx2; hipGetSymbolAddress(&mtx1, sobel_mtx_1); hipGetSymbolAddress(&mtx2, sobel_mtx_2); cu_convolve_row_sobel3(gm_in, gm_tmp, mtx1, 1, img_w, img_h); cu_convolve_col_sobel3(gm_tmp, gm_hori, mtx2, 1, img_w, img_h); cu_convolve_row_sobel3(gm_in, gm_tmp, mtx2, 1, img_w, img_h); cu_convolve_col_sobel3(gm_tmp, gm_vert, mtx1, 1, img_w, img_h); return 0; } /*! * USR: user specified identifier for this convolution * IN_T: input pixel type * OUT_T: output pixel type * MTX_T: matrix scalar type (this type is used for intermediate results!) * MTX_S: matrix size * BLOCK_W: block width (block x dimension); note: BLOCK_W must be >= (MTX_S / 2) to fill apron * BLOCK_H: block height (block y dimension); note: BLOCK_H must be >= (MTX_S / 2) to fill apron * STEPS_X: number of convolution steps performed (number of output pixels written) by each thread in x direction * STEPS_Y: number of convolution steps performed (number of output pixels written) by each thread in y direction */ #define DECL_CU_CONVOLUTION(USR, IN_T, OUT_T, MTX_T, MTX_S, BLOCK_W, BLOCK_H, STEPS_X, STEPS_Y) \ __global__ void convol_kernel_##USR(IN_T *in, OUT_T *out, MTX_T *mtx, MTX_T div, int img_w, int img_h) \ { \ __shared__ IN_T bl_d[((STEPS_X) + 2) * (BLOCK_W)][((STEPS_Y) + 2) * (BLOCK_H)]; \ \ /* offset to upper left corner of apron */ \ const int off_x = (blockIdx.x * (STEPS_X) - 1) * (BLOCK_W) + threadIdx.x; \ const int off_y = (blockIdx.y * (STEPS_Y) - 1) * (BLOCK_H) + threadIdx.y; \ \ in += off_y * img_w + off_x; \ out += off_y * img_w + off_x; \ \ /* upper and lower apron */ \ { \ IN_T *ua_in = in - img_w * ((off_y >= 0) ? 0 : off_y); \ IN_T *la_in = in + img_w * ((img_h - off_y > ((STEPS_Y) + 1) * (BLOCK_H)) ? ((STEPS_Y) + 1) * (BLOCK_H) : (img_h - off_y - 1)); \ \ /* upper left and lower left apron */ \ bl_d[threadIdx.x][threadIdx.y] = ua_in[(off_x >= 0) ? 0 : -off_x]; \ bl_d[threadIdx.x][threadIdx.y + ((STEPS_Y) + 1) * (BLOCK_H)] = la_in[(off_x >= 0) ? 0 : -off_x]; \ \ /* upper mid and lower mid apron */ \ _Pragma("unroll") \ for (int x = 1; x <= (STEPS_X); x++) { \ bl_d[threadIdx.x + x * (BLOCK_W)][threadIdx.y] = ua_in[x * (BLOCK_W)]; \ bl_d[threadIdx.x + x * (BLOCK_W)][threadIdx.y + ((STEPS_Y) + 1) * (BLOCK_H)] = la_in[x * (BLOCK_W)]; \ } \ \ /* upper right and lower right apron */ \ bl_d[threadIdx.x + ((STEPS_X) + 1) * (BLOCK_W)][threadIdx.y] = \ ua_in[(img_w - off_x > ((STEPS_X) + 1) * (BLOCK_W)) ? ((STEPS_X) + 1) * (BLOCK_W) : img_w - off_x - 1]; \ bl_d[threadIdx.x + ((STEPS_X) + 1) * (BLOCK_W)][threadIdx.y + ((STEPS_Y) + 1) * (BLOCK_H)] = \ la_in[(img_w - off_x > ((STEPS_X) + 1) * (BLOCK_W)) ? ((STEPS_X) + 1) * (BLOCK_W) : img_w - off_x - 1]; \ } \ \ /* left and right apron */ \ { \ IN_T *la_in = in - ((off_x >= 0) ? 0 : off_x); \ IN_T *ra_in = in + ((img_w - off_x > ((STEPS_X) + 1) * (BLOCK_W)) ? ((STEPS_X) + 1) * (BLOCK_W) : img_w - off_x - 1); \ \ _Pragma("unroll") \ for (int y = 1; y <= (STEPS_Y); y++) { \ bl_d[threadIdx.x][threadIdx.y + y * (BLOCK_H)] = la_in[y * (BLOCK_H) * img_w]; \ bl_d[threadIdx.x + ((STEPS_X) + 1) * (BLOCK_W)][threadIdx.y + y * (BLOCK_H)] = ra_in[y * (BLOCK_H) * img_w]; \ } \ } \ \ /* main data */ \ _Pragma("unroll") \ for (int x = 1; x <= (STEPS_X); x++) { \ \ _Pragma("unroll") \ for (int y = 1; y <= (STEPS_Y); y++) \ bl_d[threadIdx.x + x * (BLOCK_W)][threadIdx.y + y * (BLOCK_H)] = in[x * (BLOCK_W) + y * (BLOCK_H) * img_w]; \ } \ \ __syncthreads(); \ \ _Pragma("unroll") \ for (int x = 1; x <= (STEPS_X); x++) { \ \ _Pragma("unroll") \ for (int y = 1; y <= (STEPS_Y); y++) { \ MTX_T sum = 0; \ \ _Pragma("unroll") \ for (int i = -(MTX_S / 2); i <= ((MTX_S - 1) / 2); i++) { \ \ _Pragma("unroll") \ for (int j = -(MTX_S / 2); j <= ((MTX_S - 1) / 2); j++) \ sum += mtx[(MTX_S / 2) + i + MTX_S * ((MTX_S / 2) + j)] * bl_d[threadIdx.x + x * (BLOCK_W) + i][threadIdx.y + y * (BLOCK_H) + j]; \ } \ \ out[x * (BLOCK_W) + y * (BLOCK_H) * img_w] = sum / div; \ } \ } \ } \ \ static void cu_convolve_##USR(void *gm_in, void *gm_out, void *gm_mtx, MTX_T div, int img_w, int img_h) \ { \ dim3 blocks(img_w / ((BLOCK_W) * (STEPS_X)), img_h / ((BLOCK_H) * (STEPS_Y))); \ dim3 threads((BLOCK_W), (BLOCK_H)); \ \ hipLaunchKernelGGL(( convol_kernel_##USR), dim3(blocks), dim3(threads), 0, 0, (IN_T *)gm_in, (OUT_T *)gm_out, (MTX_T *)gm_mtx, div, img_w, img_h); \ } /* DECL_CU_CONVOLUTION(wavelet_65, uint8_t, float, float, 65, 32, 32, 4, 4) __constant__ float wavelet_65[65][65]; extern "C" void cu_wavelet_filter_65(int img_w, int img_h, void *gm_in, void *gm_out, const float *wave_mtx, float div) { void *mtx; hipGetSymbolAddress(&mtx, wavelet_65); hipMemcpyToSymbol(wavelet_65, wave_mtx, 65 * 65 * sizeof(float)); cu_convolve_wavelet_65(gm_in, gm_out, mtx, div, img_w, img_h); } */ DECL_CU_CONVOLUTION_ROW(sf, uint8_t, float, float, 65, 32, 4, 4) DECL_CU_CONVOLUTION_COL(sf, float, float, float, 65, 4, 32, 4) __constant__ float mtx_f65[65]; extern "C" void cu_convolve_row_f65(int img_w, int img_h, void *gm_in, void *gm_out, const float *mtx, float div) { void *mtx_s; hipGetSymbolAddress(&mtx_s, mtx_f65); hipMemcpyToSymbol(mtx_f65, mtx, 65 * sizeof(float)); cu_convolve_row_sf65(gm_in, gm_out, mtx_s, div, img_w, img_h); } extern "C" void cu_convolve_col_f65(int img_w, int img_h, void *gm_in, void *gm_out, const float *mtx, float div) { void *mtx_s; hipGetSymbolAddress(&mtx_s, mtx_f65); hipMemcpyToSymbol(mtx_f65, mtx, 65 * sizeof(float)); cu_convolve_col_sf65(gm_in, gm_out, mtx_s, div, img_w, img_h); }
53635923237d3529757447c25d5d143f88ae5551.cu
extern "C" { #include "convolve.h" #include <stdint.h> } // http://developer.download.nvidia.com/assets/cuda/files/convolutionSeparable.pdf /*! * USR: user specified identifier for this convolution * IN_T: input pixel type * OUT_T: output pixel type * MTX_T: matrix scalar type (this type is used for intermediate results!) * MTX_S: matrix size * BLOCK_W: block width (block x dimension); note: BLOCK_W must be >= (MTX_S / 2) to fill apron * BLOCK_H: block height (block y dimension) * STEPS: number of convolution steps performed (number of output pixels written) by each thread */ #define DECL_CU_CONVOLUTION_ROW(USR, IN_T, OUT_T, MTX_T, MTX_S, BLOCK_W, BLOCK_H, STEPS) \ __global__ void convol_row_k_##USR##MTX_S(IN_T *in, OUT_T *out, MTX_T *mtx, MTX_T div, int img_w) \ { \ __shared__ IN_T bl_d[(BLOCK_H)][((STEPS) + 2) * (BLOCK_W)]; \ \ /* offset to left edge of apron */ \ const int off_x = (blockIdx.x * (STEPS) - 1) * (BLOCK_W) + threadIdx.x; \ const int off_y = blockIdx.y * (BLOCK_H) + threadIdx.y; \ \ in += off_y * img_w + off_x; \ out += off_y * img_w + off_x; \ \ /* left apron */ \ bl_d[threadIdx.y][threadIdx.x] = (off_x >= 0) ? in[0] : in[-off_x]; \ \ /* main data */ \ _Pragma("unroll") \ for (int i = 1; i <= (STEPS); i++) \ bl_d[threadIdx.y][threadIdx.x + i * (BLOCK_W)] = in[i * (BLOCK_W)]; \ \ /* right apron */ \ bl_d[threadIdx.y][threadIdx.x + ((STEPS) + 1) * (BLOCK_W)] = (img_w - off_x > ((STEPS) + 1) * (BLOCK_W)) ? \ in[((STEPS) + 1) * (BLOCK_W)] : in[img_w - off_x - 1]; \ \ __syncthreads(); \ \ _Pragma("unroll") \ for (int i = 1; i <= (STEPS); i++) { \ MTX_T sum = 0; \ \ _Pragma("unroll") \ for (int j = -(MTX_S / 2); j <= ((MTX_S - 1) / 2); j++) \ sum += mtx[(MTX_S / 2) + j] * bl_d[threadIdx.y][threadIdx.x + i * (BLOCK_W) + j]; \ \ out[i * (BLOCK_W)] = sum / div; \ } \ } \ \ static void cu_convolve_row_##USR##MTX_S(void *gm_in, void *gm_out, void *gm_mtx, MTX_T div, int img_w, int img_h) \ { \ dim3 blocks(img_w / ((BLOCK_W) * (STEPS)), img_h / (BLOCK_H)); \ dim3 threads((BLOCK_W), (BLOCK_H)); \ \ convol_row_k_##USR##MTX_S<<<blocks, threads>>>((IN_T *)gm_in, (OUT_T *)gm_out, (MTX_T *)gm_mtx, div, img_w); \ } /*! * USR: user specified identifier for this convolution * IN_T: input pixel type * OUT_T: output pixel type * MTX_T: matrix scalar type (this type is used for intermediate results!) * MTX_S: matrix size * BLOCK_W: block width (block x dimension) * BLOCK_H: block height (block y dimension); note: BLOCK_H must be >= (MTX_S / 2) to fill apron * STEPS: number of convolution steps performed (number of output pixels written) by each thread */ #define DECL_CU_CONVOLUTION_COL(USR, IN_T, OUT_T, MTX_T, MTX_S, BLOCK_W, BLOCK_H, STEPS) \ __global__ void convol_col_k_##USR##MTX_S(IN_T *in, OUT_T *out, MTX_T *mtx, MTX_T div, int img_w, int img_h) \ { \ __shared__ IN_T bl_d[(BLOCK_W)][((STEPS) + 2) * (BLOCK_H)]; /* +1 */ \ \ /* offset to upper edge of apron */ \ const int off_x = blockIdx.x * (BLOCK_W) + threadIdx.x; \ const int off_y = (blockIdx.y * (STEPS) - 1) * (BLOCK_H) + threadIdx.y; \ \ in += off_y * img_w + off_x; \ out += off_y * img_w + off_x; \ \ /* upper apron */ \ bl_d[threadIdx.x][threadIdx.y] = (off_y >= 0) ? in[0] : in[-off_y * img_w]; \ \ /* main data */ \ _Pragma("unroll") \ for (int i = 1; i <= (STEPS); i++) \ bl_d[threadIdx.x][threadIdx.y + i * (BLOCK_H)] = in[i * (BLOCK_H) * img_w]; \ \ /* lower apron */ \ bl_d[threadIdx.x][threadIdx.y + ((STEPS) + 1) * (BLOCK_H)] = (img_h - off_y > ((STEPS) + 1) * (BLOCK_H)) ? \ in[((STEPS) + 1) * (BLOCK_H) * img_w] : in[(img_h - off_y - 1) * img_w]; \ \ __syncthreads(); \ \ _Pragma("unroll") \ for (int i = 1; i <= (STEPS); i++) { \ MTX_T sum = 0; \ \ _Pragma("unroll") \ for (int j = -(MTX_S / 2); j <= ((MTX_S - 1) / 2); j++) \ sum += mtx[(MTX_S / 2) + j] * bl_d[threadIdx.x][threadIdx.y + i * (BLOCK_H) + j]; \ \ out[i * (BLOCK_H) * img_w] = sum / div; \ } \ } \ \ static void cu_convolve_col_##USR##MTX_S(void *gm_in, void *gm_out, void *gm_mtx, MTX_T div, int img_w, int img_h) \ { \ dim3 blocks(img_w / (BLOCK_W), img_h / ((BLOCK_H) * (STEPS))); \ dim3 threads((BLOCK_W), (BLOCK_H)); \ \ convol_col_k_##USR##MTX_S<<<blocks, threads>>>((IN_T *)gm_in, (OUT_T *)gm_out, (MTX_T *)gm_mtx, div, img_w, img_h);\ } #define WARP_SIZE 32 DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 3, 1, (WARP_SIZE / 1), 8) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 3, (WARP_SIZE / 1), 1, 8) DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 5, 2, (WARP_SIZE / 2), 8) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 5, (WARP_SIZE / 2), 2, 8) DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 7, 4, (WARP_SIZE / 4), 4) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 7, (WARP_SIZE / 4), 4, 4) DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 9, 4, (WARP_SIZE / 4), 4) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 9, (WARP_SIZE / 4), 4, 4) DECL_CU_CONVOLUTION_ROW(gauss, uint8_t, uint8_t, int, 11, 8, (WARP_SIZE / 8), 4) DECL_CU_CONVOLUTION_COL(gauss, uint8_t, uint8_t, int, 11, (WARP_SIZE / 8), 8, 4) __constant__ int gauss_mtx_3[3] = { 1, 2, 1 }; __constant__ int gauss_mtx_5[5] = { 1, 4, 6, 4, 1 }; __constant__ int gauss_mtx_7[7] = { 1, 6, 15, 20, 15, 6, 1 }; __constant__ int gauss_mtx_9[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 }; __constant__ int gauss_mtx_11[11] = { 1, 10, 45, 120, 210, 252, 210, 120, 45, 10, 1 }; extern "C" int cu_gauss_filter(int rad, int img_w, int img_h, void *gm_in, void *gm_out, void *gm_tmp) { void *mtx; switch (rad) { case 3: cudaGetSymbolAddress(&mtx, gauss_mtx_3); cu_convolve_row_gauss3(gm_in, gm_tmp, mtx, 4, img_w, img_h); cu_convolve_col_gauss3(gm_tmp, gm_out, mtx, 4, img_w, img_h); break; case 5: cudaGetSymbolAddress(&mtx, gauss_mtx_5); cu_convolve_row_gauss5(gm_in, gm_tmp, mtx, 16, img_w, img_h); cu_convolve_col_gauss5(gm_tmp, gm_out, mtx, 16, img_w, img_h); break; case 7: cudaGetSymbolAddress(&mtx, gauss_mtx_7); cu_convolve_row_gauss7(gm_in, gm_tmp, mtx, 64, img_w, img_h); cu_convolve_col_gauss7(gm_tmp, gm_out, mtx, 64, img_w, img_h); break; case 9: cudaGetSymbolAddress(&mtx, gauss_mtx_9); cu_convolve_row_gauss9(gm_in, gm_tmp, mtx, 256, img_w, img_h); cu_convolve_col_gauss9(gm_tmp, gm_out, mtx, 256, img_w, img_h); break; case 11: cudaGetSymbolAddress(&mtx, gauss_mtx_11); cu_convolve_row_gauss11(gm_in, gm_tmp, mtx, 1024, img_w, img_h); cu_convolve_col_gauss11(gm_tmp, gm_out, mtx, 1024, img_w, img_h); break; default: return -1; } return 0; } DECL_CU_CONVOLUTION_ROW(gaussf, float, float, float, 11, 6, (WARP_SIZE / 6), 4) DECL_CU_CONVOLUTION_COL(gaussf, float, float, float, 11, (WARP_SIZE / 6), 6, 4) __constant__ float gauss_mtx_f11[11] = { 1.0, 10.0, 45.0, 120.0, 210.0, 252.0, 210.0, 120.0, 45.0, 10.0, 1.0 }; extern "C" void cu_gauss_filter_f11(int img_w, int img_h, void *gm_in, void *gm_out, void *gm_tmp) { void *mtx; cudaGetSymbolAddress(&mtx, gauss_mtx_f11); cu_convolve_row_gaussf11(gm_in, gm_tmp, mtx, 1024.0, img_w, img_h); cu_convolve_col_gaussf11(gm_tmp, gm_out, mtx, 1024.0, img_w, img_h); } DECL_CU_CONVOLUTION_ROW(sobel, uint8_t, int16_t, int, 3, 1, (WARP_SIZE / 1), 8); DECL_CU_CONVOLUTION_COL(sobel, int16_t, int16_t, int, 3, (WARP_SIZE / 1), 1, 8); __constant__ int sobel_mtx_1[3] = { -1, 0, 1 }; __constant__ int sobel_mtx_2[3] = { 1, 2, 1 }; extern "C" int cu_sobel_filter(int img_w, int img_h, void *gm_in, void *gm_hori, void *gm_vert, void *gm_tmp) { void *mtx1, *mtx2; cudaGetSymbolAddress(&mtx1, sobel_mtx_1); cudaGetSymbolAddress(&mtx2, sobel_mtx_2); cu_convolve_row_sobel3(gm_in, gm_tmp, mtx1, 1, img_w, img_h); cu_convolve_col_sobel3(gm_tmp, gm_hori, mtx2, 1, img_w, img_h); cu_convolve_row_sobel3(gm_in, gm_tmp, mtx2, 1, img_w, img_h); cu_convolve_col_sobel3(gm_tmp, gm_vert, mtx1, 1, img_w, img_h); return 0; } /*! * USR: user specified identifier for this convolution * IN_T: input pixel type * OUT_T: output pixel type * MTX_T: matrix scalar type (this type is used for intermediate results!) * MTX_S: matrix size * BLOCK_W: block width (block x dimension); note: BLOCK_W must be >= (MTX_S / 2) to fill apron * BLOCK_H: block height (block y dimension); note: BLOCK_H must be >= (MTX_S / 2) to fill apron * STEPS_X: number of convolution steps performed (number of output pixels written) by each thread in x direction * STEPS_Y: number of convolution steps performed (number of output pixels written) by each thread in y direction */ #define DECL_CU_CONVOLUTION(USR, IN_T, OUT_T, MTX_T, MTX_S, BLOCK_W, BLOCK_H, STEPS_X, STEPS_Y) \ __global__ void convol_kernel_##USR(IN_T *in, OUT_T *out, MTX_T *mtx, MTX_T div, int img_w, int img_h) \ { \ __shared__ IN_T bl_d[((STEPS_X) + 2) * (BLOCK_W)][((STEPS_Y) + 2) * (BLOCK_H)]; \ \ /* offset to upper left corner of apron */ \ const int off_x = (blockIdx.x * (STEPS_X) - 1) * (BLOCK_W) + threadIdx.x; \ const int off_y = (blockIdx.y * (STEPS_Y) - 1) * (BLOCK_H) + threadIdx.y; \ \ in += off_y * img_w + off_x; \ out += off_y * img_w + off_x; \ \ /* upper and lower apron */ \ { \ IN_T *ua_in = in - img_w * ((off_y >= 0) ? 0 : off_y); \ IN_T *la_in = in + img_w * ((img_h - off_y > ((STEPS_Y) + 1) * (BLOCK_H)) ? ((STEPS_Y) + 1) * (BLOCK_H) : (img_h - off_y - 1)); \ \ /* upper left and lower left apron */ \ bl_d[threadIdx.x][threadIdx.y] = ua_in[(off_x >= 0) ? 0 : -off_x]; \ bl_d[threadIdx.x][threadIdx.y + ((STEPS_Y) + 1) * (BLOCK_H)] = la_in[(off_x >= 0) ? 0 : -off_x]; \ \ /* upper mid and lower mid apron */ \ _Pragma("unroll") \ for (int x = 1; x <= (STEPS_X); x++) { \ bl_d[threadIdx.x + x * (BLOCK_W)][threadIdx.y] = ua_in[x * (BLOCK_W)]; \ bl_d[threadIdx.x + x * (BLOCK_W)][threadIdx.y + ((STEPS_Y) + 1) * (BLOCK_H)] = la_in[x * (BLOCK_W)]; \ } \ \ /* upper right and lower right apron */ \ bl_d[threadIdx.x + ((STEPS_X) + 1) * (BLOCK_W)][threadIdx.y] = \ ua_in[(img_w - off_x > ((STEPS_X) + 1) * (BLOCK_W)) ? ((STEPS_X) + 1) * (BLOCK_W) : img_w - off_x - 1]; \ bl_d[threadIdx.x + ((STEPS_X) + 1) * (BLOCK_W)][threadIdx.y + ((STEPS_Y) + 1) * (BLOCK_H)] = \ la_in[(img_w - off_x > ((STEPS_X) + 1) * (BLOCK_W)) ? ((STEPS_X) + 1) * (BLOCK_W) : img_w - off_x - 1]; \ } \ \ /* left and right apron */ \ { \ IN_T *la_in = in - ((off_x >= 0) ? 0 : off_x); \ IN_T *ra_in = in + ((img_w - off_x > ((STEPS_X) + 1) * (BLOCK_W)) ? ((STEPS_X) + 1) * (BLOCK_W) : img_w - off_x - 1); \ \ _Pragma("unroll") \ for (int y = 1; y <= (STEPS_Y); y++) { \ bl_d[threadIdx.x][threadIdx.y + y * (BLOCK_H)] = la_in[y * (BLOCK_H) * img_w]; \ bl_d[threadIdx.x + ((STEPS_X) + 1) * (BLOCK_W)][threadIdx.y + y * (BLOCK_H)] = ra_in[y * (BLOCK_H) * img_w]; \ } \ } \ \ /* main data */ \ _Pragma("unroll") \ for (int x = 1; x <= (STEPS_X); x++) { \ \ _Pragma("unroll") \ for (int y = 1; y <= (STEPS_Y); y++) \ bl_d[threadIdx.x + x * (BLOCK_W)][threadIdx.y + y * (BLOCK_H)] = in[x * (BLOCK_W) + y * (BLOCK_H) * img_w]; \ } \ \ __syncthreads(); \ \ _Pragma("unroll") \ for (int x = 1; x <= (STEPS_X); x++) { \ \ _Pragma("unroll") \ for (int y = 1; y <= (STEPS_Y); y++) { \ MTX_T sum = 0; \ \ _Pragma("unroll") \ for (int i = -(MTX_S / 2); i <= ((MTX_S - 1) / 2); i++) { \ \ _Pragma("unroll") \ for (int j = -(MTX_S / 2); j <= ((MTX_S - 1) / 2); j++) \ sum += mtx[(MTX_S / 2) + i + MTX_S * ((MTX_S / 2) + j)] * bl_d[threadIdx.x + x * (BLOCK_W) + i][threadIdx.y + y * (BLOCK_H) + j]; \ } \ \ out[x * (BLOCK_W) + y * (BLOCK_H) * img_w] = sum / div; \ } \ } \ } \ \ static void cu_convolve_##USR(void *gm_in, void *gm_out, void *gm_mtx, MTX_T div, int img_w, int img_h) \ { \ dim3 blocks(img_w / ((BLOCK_W) * (STEPS_X)), img_h / ((BLOCK_H) * (STEPS_Y))); \ dim3 threads((BLOCK_W), (BLOCK_H)); \ \ convol_kernel_##USR<<<blocks, threads>>>((IN_T *)gm_in, (OUT_T *)gm_out, (MTX_T *)gm_mtx, div, img_w, img_h); \ } /* DECL_CU_CONVOLUTION(wavelet_65, uint8_t, float, float, 65, 32, 32, 4, 4) __constant__ float wavelet_65[65][65]; extern "C" void cu_wavelet_filter_65(int img_w, int img_h, void *gm_in, void *gm_out, const float *wave_mtx, float div) { void *mtx; cudaGetSymbolAddress(&mtx, wavelet_65); cudaMemcpyToSymbol(wavelet_65, wave_mtx, 65 * 65 * sizeof(float)); cu_convolve_wavelet_65(gm_in, gm_out, mtx, div, img_w, img_h); } */ DECL_CU_CONVOLUTION_ROW(sf, uint8_t, float, float, 65, 32, 4, 4) DECL_CU_CONVOLUTION_COL(sf, float, float, float, 65, 4, 32, 4) __constant__ float mtx_f65[65]; extern "C" void cu_convolve_row_f65(int img_w, int img_h, void *gm_in, void *gm_out, const float *mtx, float div) { void *mtx_s; cudaGetSymbolAddress(&mtx_s, mtx_f65); cudaMemcpyToSymbol(mtx_f65, mtx, 65 * sizeof(float)); cu_convolve_row_sf65(gm_in, gm_out, mtx_s, div, img_w, img_h); } extern "C" void cu_convolve_col_f65(int img_w, int img_h, void *gm_in, void *gm_out, const float *mtx, float div) { void *mtx_s; cudaGetSymbolAddress(&mtx_s, mtx_f65); cudaMemcpyToSymbol(mtx_f65, mtx, 65 * sizeof(float)); cu_convolve_col_sf65(gm_in, gm_out, mtx_s, div, img_w, img_h); }
a13fb80d6b8d3a53a03ed89c2d004c0b989ad209.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #define RED_FACTOR 0.299f #define GREEN_FACTOR 0.587f #define BLUE_FACTOR 0.114f #define MAX_THREADS 1024 __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numPixels) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numPixels) { uchar4 pix = rgbaImage[idx]; greyImage[idx] = RED_FACTOR * pix.x + GREEN_FACTOR * pix.y + BLUE_FACTOR * pix.z; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched size_t numPixels = numRows * numCols; const dim3 blockSize(MAX_THREADS, 1, 1); //TODO const dim3 gridSize(numPixels / MAX_THREADS + 1, 1, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numPixels); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
a13fb80d6b8d3a53a03ed89c2d004c0b989ad209.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #define RED_FACTOR 0.299f #define GREEN_FACTOR 0.587f #define BLUE_FACTOR 0.114f #define MAX_THREADS 1024 __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numPixels) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numPixels) { uchar4 pix = rgbaImage[idx]; greyImage[idx] = RED_FACTOR * pix.x + GREEN_FACTOR * pix.y + BLUE_FACTOR * pix.z; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched size_t numPixels = numRows * numCols; const dim3 blockSize(MAX_THREADS, 1, 1); //TODO const dim3 gridSize(numPixels / MAX_THREADS + 1, 1, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numPixels); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
89ea4b0104ab3352ebb6b103e4b13b177c09dfc8.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorSort.cuh" #include "THHTensor.hpp" #include "../generic/THCTensorSort.cu" #include <THH/THHGenerateCharType.h>
89ea4b0104ab3352ebb6b103e4b13b177c09dfc8.cu
#include "../THCTensorSort.cuh" #include "THCTensor.hpp" #include "../generic/THCTensorSort.cu" #include <THC/THCGenerateCharType.h>
064722069e4806ae71830b24888ab57dfcc6b3c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* include "MCMPC.cuh" */ #include<stdio.h> #include "../include/MCMPC.cuh" __global__ void setup_kernel(hiprandState_t *state,int seed) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ hiprand_init(seed, id, 0, &state[id]); } unsigned int countBlocks(unsigned int a, unsigned int b) { unsigned int num; num = a / b; if (a < b || a % b > 0) num++; return num; } void weighted_mean(Data1 *h_Data, int Blocks, float *Us_host) { float total_weight = 0.0f; float temp[HORIZON] = {}; for(int i = 0; i < Blocks; i++){ if(isnan(h_Data[i].W)) { total_weight += 0.0f; }else{ total_weight += h_Data[i].W; } } for(int i = 0; i < HORIZON; i++) { for(int k = 0; k < Blocks; k++) { if(isnan(h_Data[k].W)) { temp[i] += 0.0f; }else{ temp[i] += h_Data[k].W * h_Data[k].Input[i] / total_weight; } if(isnan(temp[i])) { Us_host[i] = 0.0f; }else{ Us_host[i] = temp[i]; } } } } __device__ float generate_u(int t, float mean, float var, float *d_cov, float *z) { int count_index; count_index = t * HORIZON; float ret, sec_term; sec_term = 0; for(int k = 0; k < HORIZON; k++) { sec_term += d_cov[count_index+k]*z[k]; /*if(t == 0 && k == 0){ sec_term += d_cov[t]*z[k]; }else{ sec_term += d_cov[t + k*HORIZON -1]*z[k]; }*/ } ret = mean + var * sec_term; return ret; } __device__ float gen_u(unsigned int id, hiprandState_t *state, float ave, float vr) { float u; hiprandState_t localState = state[id]; u = hiprand_normal(&localState) * vr + ave; return u; } __global__ void setup_init_Covariance(float *Mat) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; //float values; /*if(threadIdx.x == 0 && blockIdx.x ==0) { values[threadIdx.x] = 1.0f; }*/ if(threadIdx.x == blockIdx.x) { Mat[id] = 1.0f; //values[threadIdx.x] = 1.0f; }else{ Mat[id] = 0.0f; //values[threadIdx.x] = 0.0f; } __syncthreads(); /*if(threadIdx.x == 0) { for(int i =0; i < blockDim.x; i++) Mat[id] = values[i]; } */ } __global__ void MCMPC_GPU_Linear_Example(float x, float y, float w, hiprandState_t *devs, Data1 *d_Datas, float var, int Blocks, float *d_cov, float *d_param, float *d_matrix) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON]= { }; //float block_var; // int Powers; //printf("hoge id=%d\n", id); //float d_state_here[dim_state] = {x,y,w}; float get_state[dim_state] = {}; float z[HORIZON] = { }; for(int t = 0; t < HORIZON; t++) { //block_var = var; for(int t_x = 0; t_x < HORIZON; t_x++) { z[t_x] = gen_u(seq, devs, 0, 1.0f); //z[t_x] = gen_u(seq, devs, d_Datas[0].Input[t_x], var); seq += HORIZON; } u[t] = generate_u(t, d_Datas[0].Input[t] /**/, var, d_cov, z); // if(isnan(u[t])){ u[t] = d_Datas[0].Input[t]; } //u[t] = z[t]; /*if(u[t]<-4.0f){ u[t] = -4.0f; } if(u[t] > 4.0f){ u[t] = 4.0f; }*/ //printf("hoge = %d id=%d @ %f %f\n",t, id, u[t], z[t]); //calc_Linear_example(d_state_here, u[t], d_param, get_state); get_state[0] = d_param[0]*x + d_param[1]*y + d_param[2]*w + d_param[9]*u[t]; get_state[1] = d_param[3]*x + d_param[4]*y + d_param[5]*w + d_param[10]*u[t]; get_state[2] = d_param[6]*x + d_param[7]*y + d_param[8]*w + d_param[11]*u[t]; x = get_state[0]; y = get_state[1]; w = get_state[2]; //printf("hoge id=%d @ %f %f %f\n", id, u[t], d_param[0], get_state[1]); //qx += d_matrix[0] * get_state[0] * get_state[0] + d_matrix[4] * get_state[1] * get_state[1] +d_matrix[5] * u[t] * u[t]; qx = x * x * d_matrix[0] + y * y * d_matrix[1] + w * w * d_matrix[2] + d_matrix[3]*u[t]*u[t]; //qx += d_matrix[1] * get_state[0] * get_state[1]; //qx += d_matrix[3] * get_state[0] * get_state[1]; //qx += d_matrix[4] * get_state[1] * get_state[1]; //qx += d_matrix[5] * u[t] * u[t]; /*for(int h = 0; h < dim_state; h++){ d_state_here[h] = get_state[h]; }*/ total_cost += qx; qx = 0.0f; } float KL_COST, S, lambda; lambda = HORIZON * dim_state; S = total_cost / lambda; KL_COST = exp(-S); W_comp[threadIdx.x] = KL_COST; L_comp[threadIdx.x] = total_cost; __syncthreads(); if(threadIdx.x == 0) { best_thread_id_this_block = 0; for(int y = 1; y < blockDim.x; y++){ if(L_comp[y] < L_comp[best_thread_id_this_block]) { best_thread_id_this_block = y; } } } __syncthreads(); if(threadIdx.x == best_thread_id_this_block) { Data1 block_best; block_best.L = L_comp[best_thread_id_this_block]; block_best.W = W_comp[best_thread_id_this_block]; for(int z = 0; z < HORIZON; z++) { block_best.Input[z] = u[z]; } d_Datas[blockIdx.x] = block_best; } } __global__ void shift_Input_vec(Input_vec *dst, float *dev_Us) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; for(int k = 0; k < HORIZON - 1; k++){ dst[id].Input[k] = dev_Us[k+1]; } dst[id].Input[HORIZON-1] = dev_Us[HORIZON - 1]; __syncthreads(); } #ifdef USING_THRUST __global__ void Using_Thrust_MCMPC_Linear(float x, float y, float w, hiprandState_t *devs,Input_vec *d_Datas, float var, int Blocks, float *d_cov, float *d_param, float *d_matrix, float *cost_vec){ unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON]= { }; //float block_var; // int Powers; //printf("hoge id=%d\n", id); //float d_state_here[dim_state] = {x,y,w}; float get_state[dim_state] = {}; float z[HORIZON] = { }; cost_vec[id] = 0.0f; for(int t_x = 0; t_x < HORIZON; t_x++) { z[t_x] = gen_u(seq, devs, 0, 1.0f); //z[t_x] = gen_u(seq, devs, d_Datas[0].Input[t_x], var); seq += N_OF_SAMPLES; __syncthreads(); } for(int t = 0; t < HORIZON; t++) { //block_var = var; __syncthreads(); //printf("id == %d -> z[%d]==%f\n",id, t, z[t]); u[t] = generate_u(t, d_Datas[0].Input[t] /**/, var, d_cov, z); // if(isnan(u[t])){ u[t] = d_Datas[0].Input[t]; } get_state[0] = d_param[0]*x + d_param[1]*y + d_param[2]*w + d_param[9]*u[t]; get_state[1] = d_param[3]*x + d_param[4]*y + d_param[5]*w + d_param[10]*u[t]; get_state[2] = d_param[6]*x + d_param[7]*y + d_param[8]*w + d_param[11]*u[t]; x = get_state[0]; y = get_state[1]; w = get_state[2]; qx = x * x * d_matrix[0] + y * y * d_matrix[1] + w * w * d_matrix[2] + d_matrix[3]*u[t]*u[t]; total_cost += qx; qx = 0.0f; } if(isnan(total_cost)) { total_cost = 100000; } float KL_COST, S, lambda; lambda = HORIZON * dim_state; //lambda = 10.0f; S = total_cost / lambda; KL_COST = exp(-S); /*W_comp[threadIdx.x] = KL_COST; L_comp[threadIdx.x] = total_cost;*/ __syncthreads(); d_Datas[id].W = KL_COST; //d_Datas[id].L = total_cost; for(int index = 0; index < HORIZON; index++) { d_Datas[id].Input[index] = u[index]; d_Datas[id].dy[index] = z[index]; } cost_vec[id] = total_cost; __syncthreads(); } __global__ void Using_Thrust_MCMPC_Pendulum(float x, float th, float dx, float dth, hiprandState_t *devs, Input_vec *d_Datas, float var, int Blocks, float *d_cov, float *d_param, float *d_const, float *d_matrix, float *cost_vec) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON]= { }; //float block_var; // int Powers; //printf("hoge id=%d\n", id); //float d_state_here[dim_state] = {x,y,w}; //float get_state[dim_state] = {}; float z[HORIZON] = { }; cost_vec[id] = 0.0f; float ddx, ddtheta; for(int t_x = 0; t_x < HORIZON; t_x++) { z[t_x] = gen_u(seq, devs, 0, 1.0f); //z[t_x] = gen_u(seq, devs, d_Datas[0].Input[t_x], var); seq += HORIZON; } __syncthreads(); for(int t = 0; t < HORIZON; t++) { //block_var = var; //__syncthreads(); //printf("id == %d -> z[%d]==%f\n",id, t, z[t]); u[t] = generate_u(t, d_Datas[0].Input[t] /**/, var, d_cov, z); // if(isnan(u[t])){ u[t] = d_Datas[0].Input[t]; } if(u[t] < d_const[0]){ u[t] = d_const[0]; } if(u[t] > d_const[1]){ u[t] = d_const[1]; } ddx = Cart_type_Pendulum_ddx(u[t], x, th, dx, dth, d_param); ddtheta = Cart_type_Pendulum_ddtheta(u[t], x, th, dx, dth, d_param); dx = dx + (ddx * interval); dth = dth + (ddtheta * interval); x = x + (dx * interval); th = th + (dth * interval); while (th > M_PI) th -= (2 * M_PI); while (th < -M_PI) th += (2 * M_PI); /*if(id == 1000 || id == 1001){ printf("id = %d :: u[%d] = %f x = %f th = %f\n", id, t, u[t], x, th); }*/ qx = x * x * d_matrix[0] + th * th * d_matrix[1] + dx * dx * d_matrix[2] + dth * dth * d_matrix[3] + d_matrix[4] * u[t] * u[t]; /*qx = x * x * d_matrix[0] + y * y * d_matrix[1] + w * w * d_matrix[2] + d_matrix[3]*u[t]*u[t];*/ if( x <= 0){ qx += 1 / pow(9.0*(x - d_const[2]),2); if(x < d_const[2]){ qx += 10000000; } }else{ qx += 1 / pow(9.0*(d_const[3] - x),2); if(x > d_const[3]){ qx += 10000000; } } total_cost += qx; qx = 0.0f; } if(isnan(total_cost)) { total_cost = 100000; } float KL_COST, S, lambda; // lambda = HORIZON * dim_state; lambda = 4 * HORIZON; //lambda = 10.0f; S = total_cost / lambda; KL_COST = exp(-S); /*W_comp[threadIdx.x] = KL_COST; L_comp[threadIdx.x] = total_cost;*/ __syncthreads(); d_Datas[id].W = KL_COST; d_Datas[id].L = total_cost; //d_Datas[id].L = total_cost; for(int index = 0; index < HORIZON; index++) { d_Datas[id].Input[index] = u[index]; d_Datas[id].dy[index] = z[index]; } cost_vec[id] = total_cost; __syncthreads(); } __global__ void set_Input_vec(Input_vec *d_Input_vec, float init) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; for(int i=0; i < HORIZON; i++){ d_Input_vec[id].Input[i] = init; } __syncthreads(); } __global__ void callback_elite_sample(Data1 *d_Datas, Input_vec *dst, int *elite_indices) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; d_Datas[id].W = dst[elite_indices[id]].W; d_Datas[id].L = dst[elite_indices[id]].L; for(int i = 0; i < HORIZON; i++){ d_Datas[id].Input[i] = dst[elite_indices[id]].Input[i]; d_Datas[id].dy[i] = dst[elite_indices[id]].dy[i]; } } __global__ void reset_Input_vec(Input_vec *d_Input_vec, float *opt){ unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; for(int i = 0; i < HORIZON; i++){ d_Input_vec[id].Input[i] = opt[i]; } } #endif
064722069e4806ae71830b24888ab57dfcc6b3c5.cu
/* include "MCMPC.cuh" */ #include<stdio.h> #include "../include/MCMPC.cuh" __global__ void setup_kernel(curandState *state,int seed) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(seed, id, 0, &state[id]); } unsigned int countBlocks(unsigned int a, unsigned int b) { unsigned int num; num = a / b; if (a < b || a % b > 0) num++; return num; } void weighted_mean(Data1 *h_Data, int Blocks, float *Us_host) { float total_weight = 0.0f; float temp[HORIZON] = {}; for(int i = 0; i < Blocks; i++){ if(isnan(h_Data[i].W)) { total_weight += 0.0f; }else{ total_weight += h_Data[i].W; } } for(int i = 0; i < HORIZON; i++) { for(int k = 0; k < Blocks; k++) { if(isnan(h_Data[k].W)) { temp[i] += 0.0f; }else{ temp[i] += h_Data[k].W * h_Data[k].Input[i] / total_weight; } if(isnan(temp[i])) { Us_host[i] = 0.0f; }else{ Us_host[i] = temp[i]; } } } } __device__ float generate_u(int t, float mean, float var, float *d_cov, float *z) { int count_index; count_index = t * HORIZON; float ret, sec_term; sec_term = 0; for(int k = 0; k < HORIZON; k++) { sec_term += d_cov[count_index+k]*z[k]; /*if(t == 0 && k == 0){ sec_term += d_cov[t]*z[k]; }else{ sec_term += d_cov[t + k*HORIZON -1]*z[k]; }*/ } ret = mean + var * sec_term; return ret; } __device__ float gen_u(unsigned int id, curandState *state, float ave, float vr) { float u; curandState localState = state[id]; u = curand_normal(&localState) * vr + ave; return u; } __global__ void setup_init_Covariance(float *Mat) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; //float values; /*if(threadIdx.x == 0 && blockIdx.x ==0) { values[threadIdx.x] = 1.0f; }*/ if(threadIdx.x == blockIdx.x) { Mat[id] = 1.0f; //values[threadIdx.x] = 1.0f; }else{ Mat[id] = 0.0f; //values[threadIdx.x] = 0.0f; } __syncthreads(); /*if(threadIdx.x == 0) { for(int i =0; i < blockDim.x; i++) Mat[id] = values[i]; } */ } __global__ void MCMPC_GPU_Linear_Example(float x, float y, float w, curandState *devs, Data1 *d_Datas, float var, int Blocks, float *d_cov, float *d_param, float *d_matrix) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON]= { }; //float block_var; // int Powers; //printf("hoge id=%d\n", id); //float d_state_here[dim_state] = {x,y,w}; float get_state[dim_state] = {}; float z[HORIZON] = { }; for(int t = 0; t < HORIZON; t++) { //block_var = var; for(int t_x = 0; t_x < HORIZON; t_x++) { z[t_x] = gen_u(seq, devs, 0, 1.0f); //z[t_x] = gen_u(seq, devs, d_Datas[0].Input[t_x], var); seq += HORIZON; } u[t] = generate_u(t, d_Datas[0].Input[t] /*ここが影響している可能性*/, var, d_cov, z); //ここが影響している可能性 if(isnan(u[t])){ u[t] = d_Datas[0].Input[t]; } //u[t] = z[t]; /*if(u[t]<-4.0f){ u[t] = -4.0f; } if(u[t] > 4.0f){ u[t] = 4.0f; }*/ //printf("hoge = %d id=%d @ %f %f\n",t, id, u[t], z[t]); //calc_Linear_example(d_state_here, u[t], d_param, get_state); get_state[0] = d_param[0]*x + d_param[1]*y + d_param[2]*w + d_param[9]*u[t]; get_state[1] = d_param[3]*x + d_param[4]*y + d_param[5]*w + d_param[10]*u[t]; get_state[2] = d_param[6]*x + d_param[7]*y + d_param[8]*w + d_param[11]*u[t]; x = get_state[0]; y = get_state[1]; w = get_state[2]; //printf("hoge id=%d @ %f %f %f\n", id, u[t], d_param[0], get_state[1]); //qx += d_matrix[0] * get_state[0] * get_state[0] + d_matrix[4] * get_state[1] * get_state[1] +d_matrix[5] * u[t] * u[t]; qx = x * x * d_matrix[0] + y * y * d_matrix[1] + w * w * d_matrix[2] + d_matrix[3]*u[t]*u[t]; //qx += d_matrix[1] * get_state[0] * get_state[1]; //qx += d_matrix[3] * get_state[0] * get_state[1]; //qx += d_matrix[4] * get_state[1] * get_state[1]; //qx += d_matrix[5] * u[t] * u[t]; /*for(int h = 0; h < dim_state; h++){ d_state_here[h] = get_state[h]; }*/ total_cost += qx; qx = 0.0f; } float KL_COST, S, lambda; lambda = HORIZON * dim_state; S = total_cost / lambda; KL_COST = exp(-S); W_comp[threadIdx.x] = KL_COST; L_comp[threadIdx.x] = total_cost; __syncthreads(); if(threadIdx.x == 0) { best_thread_id_this_block = 0; for(int y = 1; y < blockDim.x; y++){ if(L_comp[y] < L_comp[best_thread_id_this_block]) { best_thread_id_this_block = y; } } } __syncthreads(); if(threadIdx.x == best_thread_id_this_block) { Data1 block_best; block_best.L = L_comp[best_thread_id_this_block]; block_best.W = W_comp[best_thread_id_this_block]; for(int z = 0; z < HORIZON; z++) { block_best.Input[z] = u[z]; } d_Datas[blockIdx.x] = block_best; } } __global__ void shift_Input_vec(Input_vec *dst, float *dev_Us) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; for(int k = 0; k < HORIZON - 1; k++){ dst[id].Input[k] = dev_Us[k+1]; } dst[id].Input[HORIZON-1] = dev_Us[HORIZON - 1]; __syncthreads(); } #ifdef USING_THRUST __global__ void Using_Thrust_MCMPC_Linear(float x, float y, float w, curandState *devs,Input_vec *d_Datas, float var, int Blocks, float *d_cov, float *d_param, float *d_matrix, float *cost_vec){ unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON]= { }; //float block_var; // int Powers; //printf("hoge id=%d\n", id); //float d_state_here[dim_state] = {x,y,w}; float get_state[dim_state] = {}; float z[HORIZON] = { }; cost_vec[id] = 0.0f; for(int t_x = 0; t_x < HORIZON; t_x++) { z[t_x] = gen_u(seq, devs, 0, 1.0f); //z[t_x] = gen_u(seq, devs, d_Datas[0].Input[t_x], var); seq += N_OF_SAMPLES; __syncthreads(); } for(int t = 0; t < HORIZON; t++) { //block_var = var; __syncthreads(); //printf("id == %d -> z[%d]==%f\n",id, t, z[t]); u[t] = generate_u(t, d_Datas[0].Input[t] /*ここが影響している可能性*/, var, d_cov, z); //ここが影響している可能性 if(isnan(u[t])){ u[t] = d_Datas[0].Input[t]; } get_state[0] = d_param[0]*x + d_param[1]*y + d_param[2]*w + d_param[9]*u[t]; get_state[1] = d_param[3]*x + d_param[4]*y + d_param[5]*w + d_param[10]*u[t]; get_state[2] = d_param[6]*x + d_param[7]*y + d_param[8]*w + d_param[11]*u[t]; x = get_state[0]; y = get_state[1]; w = get_state[2]; qx = x * x * d_matrix[0] + y * y * d_matrix[1] + w * w * d_matrix[2] + d_matrix[3]*u[t]*u[t]; total_cost += qx; qx = 0.0f; } if(isnan(total_cost)) { total_cost = 100000; } float KL_COST, S, lambda; lambda = HORIZON * dim_state; //lambda = 10.0f; S = total_cost / lambda; KL_COST = exp(-S); /*W_comp[threadIdx.x] = KL_COST; L_comp[threadIdx.x] = total_cost;*/ __syncthreads(); d_Datas[id].W = KL_COST; //d_Datas[id].L = total_cost; for(int index = 0; index < HORIZON; index++) { d_Datas[id].Input[index] = u[index]; d_Datas[id].dy[index] = z[index]; } cost_vec[id] = total_cost; __syncthreads(); } __global__ void Using_Thrust_MCMPC_Pendulum(float x, float th, float dx, float dth, curandState *devs, Input_vec *d_Datas, float var, int Blocks, float *d_cov, float *d_param, float *d_const, float *d_matrix, float *cost_vec) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; unsigned int seq; seq = id; float qx = 0.0f; float total_cost = 0.0f; float u[HORIZON]= { }; //float block_var; // int Powers; //printf("hoge id=%d\n", id); //float d_state_here[dim_state] = {x,y,w}; //float get_state[dim_state] = {}; float z[HORIZON] = { }; cost_vec[id] = 0.0f; float ddx, ddtheta; for(int t_x = 0; t_x < HORIZON; t_x++) { z[t_x] = gen_u(seq, devs, 0, 1.0f); //z[t_x] = gen_u(seq, devs, d_Datas[0].Input[t_x], var); seq += HORIZON; } __syncthreads(); for(int t = 0; t < HORIZON; t++) { //block_var = var; //__syncthreads(); //printf("id == %d -> z[%d]==%f\n",id, t, z[t]); u[t] = generate_u(t, d_Datas[0].Input[t] /*ここが影響している可能性*/, var, d_cov, z); //ここが影響している可能性 if(isnan(u[t])){ u[t] = d_Datas[0].Input[t]; } if(u[t] < d_const[0]){ u[t] = d_const[0]; } if(u[t] > d_const[1]){ u[t] = d_const[1]; } ddx = Cart_type_Pendulum_ddx(u[t], x, th, dx, dth, d_param); ddtheta = Cart_type_Pendulum_ddtheta(u[t], x, th, dx, dth, d_param); dx = dx + (ddx * interval); dth = dth + (ddtheta * interval); x = x + (dx * interval); th = th + (dth * interval); while (th > M_PI) th -= (2 * M_PI); while (th < -M_PI) th += (2 * M_PI); /*if(id == 1000 || id == 1001){ printf("id = %d :: u[%d] = %f x = %f th = %f\n", id, t, u[t], x, th); }*/ qx = x * x * d_matrix[0] + th * th * d_matrix[1] + dx * dx * d_matrix[2] + dth * dth * d_matrix[3] + d_matrix[4] * u[t] * u[t]; /*qx = x * x * d_matrix[0] + y * y * d_matrix[1] + w * w * d_matrix[2] + d_matrix[3]*u[t]*u[t];*/ if( x <= 0){ qx += 1 / pow(9.0*(x - d_const[2]),2); if(x < d_const[2]){ qx += 10000000; } }else{ qx += 1 / pow(9.0*(d_const[3] - x),2); if(x > d_const[3]){ qx += 10000000; } } total_cost += qx; qx = 0.0f; } if(isnan(total_cost)) { total_cost = 100000; } float KL_COST, S, lambda; // lambda = HORIZON * dim_state; lambda = 4 * HORIZON; //lambda = 10.0f; S = total_cost / lambda; KL_COST = exp(-S); /*W_comp[threadIdx.x] = KL_COST; L_comp[threadIdx.x] = total_cost;*/ __syncthreads(); d_Datas[id].W = KL_COST; d_Datas[id].L = total_cost; //d_Datas[id].L = total_cost; for(int index = 0; index < HORIZON; index++) { d_Datas[id].Input[index] = u[index]; d_Datas[id].dy[index] = z[index]; } cost_vec[id] = total_cost; __syncthreads(); } __global__ void set_Input_vec(Input_vec *d_Input_vec, float init) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; for(int i=0; i < HORIZON; i++){ d_Input_vec[id].Input[i] = init; } __syncthreads(); } __global__ void callback_elite_sample(Data1 *d_Datas, Input_vec *dst, int *elite_indices) { unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; d_Datas[id].W = dst[elite_indices[id]].W; d_Datas[id].L = dst[elite_indices[id]].L; for(int i = 0; i < HORIZON; i++){ d_Datas[id].Input[i] = dst[elite_indices[id]].Input[i]; d_Datas[id].dy[i] = dst[elite_indices[id]].dy[i]; } } __global__ void reset_Input_vec(Input_vec *d_Input_vec, float *opt){ unsigned int id = threadIdx.x + blockDim.x * blockIdx.x; for(int i = 0; i < HORIZON; i++){ d_Input_vec[id].Input[i] = opt[i]; } } #endif
41b477a11d6569fd6e7a2453de94c9c03c74da56.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void complement(int *A, int *B){ int i = blockIdx.x, j = threadIdx.x, m = gridDim.x, n = blockDim.x,k=1,temp,t=0,rev; if(i!=0 && i!=m-1 && j!=0 && j!=n-1){ temp = A[i*n+j]; do{ t = t*10 + !(temp%2); temp /= 2; }while(temp>0); do{ temp = t%10; rev = rev*10 + temp; t /= 10; }while(t>0); B[i*n+j] = rev; } else B[i*n+j] = A[i*n+j]; } int main(){ int *a,*t,m,n,i,j,*da,*dt; printf("Enter m: "); scanf("%d",&m); printf("Enter n: "); scanf("%d",&n); int size = sizeof(int)*m*n; a = (int *)malloc(size); t = (int *)malloc(size); printf("Enter the matrix:\n"); for(i=0;i<m*n;i++) scanf("%d",&a[i]); hipMalloc((void **)&da,size); hipMalloc((void **)&dt,size); hipMemcpy(da,a,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( complement), dim3(m),dim3(n), 0, 0, da,dt); hipMemcpy(t,dt,size,hipMemcpyDeviceToHost); printf("Result:\n"); for(i=0;i<m;i++){ for(j=0;j<n;j++) printf("%d ",t[i*n+j]); printf("\n"); } hipFree(da); hipFree(dt); return 0; }
41b477a11d6569fd6e7a2453de94c9c03c74da56.cu
#include <stdio.h> #include <stdlib.h> __global__ void complement(int *A, int *B){ int i = blockIdx.x, j = threadIdx.x, m = gridDim.x, n = blockDim.x,k=1,temp,t=0,rev; if(i!=0 && i!=m-1 && j!=0 && j!=n-1){ temp = A[i*n+j]; do{ t = t*10 + !(temp%2); temp /= 2; }while(temp>0); do{ temp = t%10; rev = rev*10 + temp; t /= 10; }while(t>0); B[i*n+j] = rev; } else B[i*n+j] = A[i*n+j]; } int main(){ int *a,*t,m,n,i,j,*da,*dt; printf("Enter m: "); scanf("%d",&m); printf("Enter n: "); scanf("%d",&n); int size = sizeof(int)*m*n; a = (int *)malloc(size); t = (int *)malloc(size); printf("Enter the matrix:\n"); for(i=0;i<m*n;i++) scanf("%d",&a[i]); cudaMalloc((void **)&da,size); cudaMalloc((void **)&dt,size); cudaMemcpy(da,a,size,cudaMemcpyHostToDevice); complement<<<m,n>>>(da,dt); cudaMemcpy(t,dt,size,cudaMemcpyDeviceToHost); printf("Result:\n"); for(i=0;i<m;i++){ for(j=0;j<n;j++) printf("%d ",t[i*n+j]); printf("\n"); } cudaFree(da); cudaFree(dt); return 0; }
389d3696525f448c9a5bbbf9ecd0fcc24c12ca68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @author Azzam Haidar @author Tingxing Dong */ #include "magma_internal.h" /******************************************************************************/ static __global__ void stepinit_ipiv_kernel(magma_int_t **ipiv_array, int pm) { magma_int_t *ipiv = ipiv_array[blockIdx.x]; int tx = threadIdx.x; #if 0 // best case senario piv = i ==> no piv // set piv equal to myself piv[i]=i if (tx < pm) { ipiv[tx] = tx+1; } #else //set piv from the last to the first shifted by 32 such a way that it simulate the worst case if (tx < pm) { int i, s; i = pm/32; i = (i == 1 ? 0 : i); s = tx%i; ipiv[tx] = ( (pm - (s*32) ) - tx/i); //printf("voici s %d pm %d me %d ipiv %d \n",s, pm, tx, ipiv[tx]); } #endif } /******************************************************************************/ extern "C" void stepinit_ipiv(magma_int_t **ipiv_array, magma_int_t pm, magma_int_t batchCount, magma_queue_t queue) { hipLaunchKernelGGL(( stepinit_ipiv_kernel) , dim3(batchCount), dim3(pm), 0, queue->cuda_stream() , ipiv_array, pm); } /******************************************************************************/ static __global__ void magma_iset_pointer_kernel( magma_int_t **output_array, magma_int_t *input, int lda, int row, int column, int batchSize) { output_array[blockIdx.x] = input + blockIdx.x * batchSize + row + column * lda; } /******************************************************************************/ extern "C" void magma_iset_pointer( magma_int_t **output_array, magma_int_t *input, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batchSize, magma_int_t batchCount, magma_queue_t queue) { /* convert consecutive stored variable to array stored for example the size of A is N*batchCount; N is the size of A(batchSize) change into A_array[0] A_array[1],... A_array[batchCount-1], where the size of each A_array[i] is N */ hipLaunchKernelGGL(( magma_iset_pointer_kernel) , dim3(batchCount), dim3(1), 0, queue->cuda_stream() , output_array, input, lda, row, column, batchSize); } /******************************************************************************/ __global__ void idisplace_pointers_kernel(magma_int_t **output_array, magma_int_t **input_array, magma_int_t lda, magma_int_t row, magma_int_t column) { magma_int_t *inpt = input_array[blockIdx.x]; output_array[blockIdx.x] = &inpt[row + column * lda]; //printf("==> zdisplace_pointer_kernel input %p input_array %p output_array %p \n",inpt, input_array[blockIdx.x],output_array[blockIdx.x]); } /******************************************************************************/ extern "C" void magma_idisplace_pointers(magma_int_t **output_array, magma_int_t **input_array, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row + lda * column; */ hipLaunchKernelGGL(( idisplace_pointers_kernel) , dim3(batchCount), dim3(1), 0, queue->cuda_stream() , output_array, input_array, lda, row, column); }
389d3696525f448c9a5bbbf9ecd0fcc24c12ca68.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @author Azzam Haidar @author Tingxing Dong */ #include "magma_internal.h" /******************************************************************************/ static __global__ void stepinit_ipiv_kernel(magma_int_t **ipiv_array, int pm) { magma_int_t *ipiv = ipiv_array[blockIdx.x]; int tx = threadIdx.x; #if 0 // best case senario piv = i ==> no piv // set piv equal to myself piv[i]=i if (tx < pm) { ipiv[tx] = tx+1; } #else //set piv from the last to the first shifted by 32 such a way that it simulate the worst case if (tx < pm) { int i, s; i = pm/32; i = (i == 1 ? 0 : i); s = tx%i; ipiv[tx] = ( (pm - (s*32) ) - tx/i); //printf("voici s %d pm %d me %d ipiv %d \n",s, pm, tx, ipiv[tx]); } #endif } /******************************************************************************/ extern "C" void stepinit_ipiv(magma_int_t **ipiv_array, magma_int_t pm, magma_int_t batchCount, magma_queue_t queue) { stepinit_ipiv_kernel <<< batchCount, pm, 0, queue->cuda_stream() >>> (ipiv_array, pm); } /******************************************************************************/ static __global__ void magma_iset_pointer_kernel( magma_int_t **output_array, magma_int_t *input, int lda, int row, int column, int batchSize) { output_array[blockIdx.x] = input + blockIdx.x * batchSize + row + column * lda; } /******************************************************************************/ extern "C" void magma_iset_pointer( magma_int_t **output_array, magma_int_t *input, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batchSize, magma_int_t batchCount, magma_queue_t queue) { /* convert consecutive stored variable to array stored for example the size of A is N*batchCount; N is the size of A(batchSize) change into A_array[0] A_array[1],... A_array[batchCount-1], where the size of each A_array[i] is N */ magma_iset_pointer_kernel <<< batchCount, 1, 0, queue->cuda_stream() >>> (output_array, input, lda, row, column, batchSize); } /******************************************************************************/ __global__ void idisplace_pointers_kernel(magma_int_t **output_array, magma_int_t **input_array, magma_int_t lda, magma_int_t row, magma_int_t column) { magma_int_t *inpt = input_array[blockIdx.x]; output_array[blockIdx.x] = &inpt[row + column * lda]; //printf("==> zdisplace_pointer_kernel input %p input_array %p output_array %p \n",inpt, input_array[blockIdx.x],output_array[blockIdx.x]); } /******************************************************************************/ extern "C" void magma_idisplace_pointers(magma_int_t **output_array, magma_int_t **input_array, magma_int_t lda, magma_int_t row, magma_int_t column, magma_int_t batchCount, magma_queue_t queue) { /* compute the offset for all the matrices and save the displacment of the new pointer on output_array. input_array contains the pointers to the initial position. output_array[i] = input_array[i] + row + lda * column; */ idisplace_pointers_kernel <<< batchCount, 1, 0, queue->cuda_stream() >>> (output_array, input_array, lda, row, column); }
d2669fb26ce8ef0fbe056b14a9d98fa3b5e83631.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <infiniband/verbs.h> #include <sys/types.h> #include <sys/socket.h> #include <string.h> #include <assert.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #include "common.h" ///////////////////////////////////////////////// DO NOT CHANGE /////////////////////////////////////// #define TCP_PORT_OFFSET 23456 #define TCP_PORT_RANGE 1000 #define CUDA_CHECK(f) do { \ hipError_t e = f; \ if (e != hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(e)); \ exit(1); \ } \ } while (0) __device__ int arr_min(int arr[], int arr_size) { int tid = threadIdx.x; int rhs, lhs; for (int stride = 1; stride < arr_size; stride *= 2) { if (tid >= stride && tid < arr_size) { rhs = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { lhs = arr[tid]; if (rhs != 0) { if (lhs == 0) arr[tid] = rhs; else arr[tid] = min(arr[tid], rhs); } } __syncthreads(); } int ret = arr[arr_size - 1]; return ret; } __device__ void prefix_sum(int arr[], int arr_size) { int tid = threadIdx.x; int increment; for (int stride = 1; stride < min(blockDim.x, arr_size); stride *= 2) { if (tid >= stride && tid < arr_size) { increment = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { arr[tid] += increment; } __syncthreads(); } } __global__ void gpu_process_image(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ int hist_min[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) atomicAdd(&histogram[in[i]], 1); __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { hist_min[tid] = histogram[tid]; } __syncthreads(); int cdf_min = arr_min(hist_min, 256); __shared__ uchar map[256]; if (tid < 256) { int map_value = (float)(histogram[tid] - cdf_min) / (SQR(IMG_DIMENSION) - cdf_min) * 255; map[tid] = (uchar)map_value; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { out[i] = map[in[i]]; } return; } unsigned int getTBlocksAmnt(int threadsPerBlock, int shmemPerBlock) { struct hipDeviceProp_t props; CUDA_CHECK( hipGetDeviceProperties(&props, 0) ); int ThreadsPerSM = min(props.maxThreadsPerMultiProcessor, props.regsPerMultiprocessor/32); int SMCount = props.multiProcessorCount; size_t shmemPerSM = props.sharedMemPerMultiprocessor; return SMCount * min( ThreadsPerSM/threadsPerBlock, (unsigned int)shmemPerSM/shmemPerBlock); } __global__ void gpu_process_image_pc(volatile void* in,volatile void* out) { __shared__ int histogram[256]; __shared__ int hist_min[256]; int tid = threadIdx.x; int bid = blockIdx.x; uchar * jobQptr; int currJobId; Q *inQ = (Q*)in, *outQ = (Q*)out; inQ += bid; outQ += bid; while (true) { __threadfence(); while(inQ->tail >= inQ->head) __threadfence_system(); // wait for queue to contain a job //save the job ptr and the job id jobQptr = inQ->jobs[inQ->tail % QSIZE].job; currJobId = inQ->jobs[inQ->tail % QSIZE].jobId; if(currJobId == DONEJOB) { __threadfence(); return; } /*----------------------------------------------------------------------------*/ /*do here the copy*/ // // //do the calcs __threadfence(); if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { __threadfence_system(); // needed to get currect image data atomicAdd(&histogram[jobQptr[i]], 1); } __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { hist_min[tid] = histogram[tid]; } __syncthreads(); int cdf_min = arr_min(hist_min, 256); __shared__ uchar map[256]; if (tid < 256) { int map_value = (float)(histogram[tid] - cdf_min) / (SQR(IMG_DIMENSION) - cdf_min) * 255; map[tid] = (uchar)map_value; } __syncthreads(); while(!(outQ->head - outQ->tail < QSIZE)) __threadfence_system(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { outQ->jobs[outQ->head % QSIZE].job[i] = map[jobQptr[i]]; } outQ->jobs[outQ->head % QSIZE].jobId = currJobId; __threadfence_system(); // try to catch free cell in Qout and copy the result if (tid == 0){ //save the job-out ptr and insert the job id outQ->head ++; inQ->tail ++; // printf("GPU: sent job #%d\n",currJobId); } __threadfence_system(); } } void process_image_on_gpu(uchar *img_in, uchar *img_out) { uchar *gpu_image_in, *gpu_image_out; CUDA_CHECK(hipMalloc(&gpu_image_in, SQR(IMG_DIMENSION))); CUDA_CHECK(hipMalloc(&gpu_image_out, SQR(IMG_DIMENSION))); CUDA_CHECK(hipMemcpy(gpu_image_in, img_in, SQR(IMG_DIMENSION), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_process_image), dim3(1), dim3(1024), 0, 0, gpu_image_in, gpu_image_out); CUDA_CHECK(hipMemcpy(img_out, gpu_image_out, SQR(IMG_DIMENSION), hipMemcpyDeviceToHost)); CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipFree(gpu_image_in)); CUDA_CHECK(hipFree(gpu_image_out)); } void print_usage_and_die(char *progname) { printf("usage: [port]\n"); exit(1); } struct server_context { mode_enum mode; int tcp_port; int listen_fd; /* Listening socket for TCP connection */ int socket_fd; /* Connected socket for TCP connection */ rpc_request *requests; /* Array of outstanding requests received from the network */ uchar *images_in; /* Input images for all outstanding requests */ uchar *images_out; /* Output images for all outstanding requests */ /* InfiniBand/verbs resources */ struct ibv_context *context; struct ibv_cq *cq; struct ibv_pd *pd; struct ibv_qp *qp; struct ibv_mr *mr_requests; /* Memory region for RPC requests */ struct ibv_mr *mr_images_in; /* Memory region for input images */ struct ibv_mr *mr_images_out; /* Memory region for output images */ /* TODO: add pointers and memory region(s) for CPU-GPU queues */ struct ibv_mr *mr_in_queues; /* Memory region for in queues */ struct ibv_mr *mr_out_queues; /* Memory region for out queues */ Q *QinDev,*QoutDev,*QinHost,*QoutHost; unsigned int tblocks; }; void allocate_memory(server_context *ctx) { CUDA_CHECK(hipHostMalloc(&ctx->images_in, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), 0)); CUDA_CHECK(hipHostMalloc(&ctx->images_out, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), 0)); ctx->requests = (rpc_request *)calloc(OUTSTANDING_REQUESTS, sizeof(rpc_request)); /* TODO take CPU-GPU stream allocation code from hw2 */ unsigned int tblocks = getTBlocksAmnt(1024, 2*4*256+256); ctx->tblocks = tblocks; // allocate the queues in CPU memory CUDA_CHECK( hipHostMalloc(&ctx->QinHost, sizeof(Q)*ctx->tblocks , 0) ); CUDA_CHECK( hipHostMalloc(&ctx->QoutHost, sizeof(Q)*ctx->tblocks , 0) ); // init memory to 0's memset(ctx->QinHost, 0, sizeof(Q)*ctx->tblocks); memset(ctx->QoutHost, 0, sizeof(Q)*ctx->tblocks); // get a pointer for the GPU to use CUDA_CHECK( hipHostGetDevicePointer(&ctx->QinDev, ctx->QinHost, 0) ); CUDA_CHECK( hipHostGetDevicePointer(&ctx->QoutDev, ctx->QoutHost, 0) ); } void tcp_connection(server_context *ctx) { /* setup a TCP connection for initial negotiation with client */ int lfd = socket(AF_INET, SOCK_STREAM, 0); if (lfd < 0) { perror("socket"); exit(1); } ctx->listen_fd = lfd; struct sockaddr_in server_addr; memset(&server_addr, 0, sizeof(struct sockaddr_in)); server_addr.sin_family = AF_INET; server_addr.sin_addr.s_addr = INADDR_ANY; server_addr.sin_port = htons(ctx->tcp_port); if (bind(lfd, (struct sockaddr *)&server_addr, sizeof(struct sockaddr_in)) < 0) { perror("bind"); exit(1); } if (listen(lfd, 1)) { perror("listen"); exit(1); } printf("Server waiting on port %d. Client can connect\n", ctx->tcp_port); int sfd = accept(lfd, NULL, NULL); if (sfd < 0) { perror("accept"); exit(1); } printf("client connected\n"); ctx->socket_fd = sfd; } void initialize_verbs(server_context *ctx) { /* get device list */ struct ibv_device **device_list = ibv_get_device_list(NULL); if (!device_list) { printf("ERROR: ibv_get_device_list failed\n"); exit(1); } /* select first (and only) device to work with */ ctx->context = ibv_open_device(device_list[0]); /* create protection domain (PD) */ ctx->pd = ibv_alloc_pd(ctx->context); if (!ctx->pd) { printf("ERROR: ibv_alloc_pd() failed\n"); exit(1); } /* allocate a memory region for the RPC requests. */ ctx->mr_requests = ibv_reg_mr(ctx->pd, ctx->requests, sizeof(rpc_request) * OUTSTANDING_REQUESTS, IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_requests) { printf("ibv_reg_mr() failed for requests\n"); exit(1); } /* register a memory region for the input / output images. */ ctx->mr_images_in = ibv_reg_mr(ctx->pd, ctx->images_in, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_images_in) { printf("ibv_reg_mr() failed for input images\n"); exit(1); } /* register a memory region for the input / output images. */ ctx->mr_images_out = ibv_reg_mr(ctx->pd, ctx->images_out, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_images_out) { printf("ibv_reg_mr() failed for output images\n"); exit(1); } /* TODO register additional memory regions for CPU-GPU queues */ ctx->mr_in_queues = ibv_reg_mr(ctx->pd, ctx->QinHost, sizeof(Q)*ctx->tblocks, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ); if (!ctx->mr_in_queues) { printf("ibv_reg_mr() failed for in queue\n"); // TODO REMOVE exit(1); } ctx->mr_out_queues = ibv_reg_mr(ctx->pd, ctx->QoutHost, sizeof(Q)*ctx->tblocks, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ); if (!ctx->mr_images_out) { printf("ibv_reg_mr() failed for out queue\n"); // TODO REMOVE exit(1); } /* create completion queue (CQ). We'll use same CQ for both send and receive parts of the QP */ ctx->cq = ibv_create_cq(ctx->context, 2 * OUTSTANDING_REQUESTS, NULL, NULL, 0); /* create a CQ with place for two completions per request */ if (!ctx->cq) { printf("ERROR: ibv_create_cq() failed\n"); exit(1); } /* create QP */ struct ibv_qp_init_attr qp_init_attr; memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr)); qp_init_attr.send_cq = ctx->cq; qp_init_attr.recv_cq = ctx->cq; qp_init_attr.qp_type = IBV_QPT_RC; /* we'll use RC transport service, which supports RDMA */ qp_init_attr.cap.max_send_wr = OUTSTANDING_REQUESTS; /* max of 1 WQE in-flight in SQ per request. that's enough for us */ qp_init_attr.cap.max_recv_wr = OUTSTANDING_REQUESTS; /* max of 1 WQE in-flight in RQ per request. that's enough for us */ qp_init_attr.cap.max_send_sge = 1; /* 1 SGE in each send WQE */ qp_init_attr.cap.max_recv_sge = 1; /* 1 SGE in each recv WQE */ ctx->qp = ibv_create_qp(ctx->pd, &qp_init_attr); if (!ctx->qp) { printf("ERROR: ibv_create_qp() failed\n"); exit(1); } } void exchange_parameters(server_context *ctx, ib_info_t *client_info) { /* ok, before we continue we need to get info about the client' QP, and send it info about ours. * namely: QP number, and LID. * we'll use the TCP socket for that */ /* first query port for its LID (L2 address) */ int ret; struct ibv_port_attr port_attr; ret = ibv_query_port(ctx->context, IB_PORT_SERVER, &port_attr); if (ret) { printf("ERROR: ibv_query_port() failed\n"); exit(1); } /* now send our info to client */ struct ib_info_t my_info; my_info.lid = port_attr.lid; my_info.qpn = ctx->qp->qp_num; /* TODO add additional server rkeys / addresses here if needed */ my_info.tblocks = ctx->tblocks; my_info.inQaddr = ctx->mr_in_queues->addr; my_info.outQaddr = ctx->mr_out_queues->addr; my_info.rkeyIn = ctx->mr_in_queues->rkey; my_info.rkeyOut = ctx->mr_out_queues->rkey; ret = send(ctx->socket_fd, &my_info, sizeof(struct ib_info_t), 0); if (ret < 0) { perror("send"); exit(1); } /* get client's info */ recv(ctx->socket_fd, client_info, sizeof(struct ib_info_t), 0); if (ret < 0) { perror("recv"); exit(1); } /* we don't need TCP anymore. kill the socket */ close(ctx->socket_fd); close(ctx->listen_fd); ctx->socket_fd = ctx->listen_fd = 0; } /* Post a receive buffer of the given index (from the requests array) to the receive queue */ void post_recv(server_context *ctx, int index) { struct ibv_recv_wr recv_wr = {}; /* this is the receive work request (the verb's representation for receive WQE) */ ibv_sge sgl; recv_wr.wr_id = index; sgl.addr = (uintptr_t)&ctx->requests[index]; sgl.length = sizeof(ctx->requests[0]); sgl.lkey = ctx->mr_requests->lkey; recv_wr.sg_list = &sgl; recv_wr.num_sge = 1; if (ibv_post_recv(ctx->qp, &recv_wr, NULL)) { printf("ERROR: ibv_post_recv() failed\n"); exit(1); } } void connect_qp(server_context *ctx, ib_info_t *client_info) { /* this is a multi-phase process, moving the state machine of the QP step by step * until we are ready */ struct ibv_qp_attr qp_attr; /*QP state: RESET -> INIT */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_INIT; qp_attr.pkey_index = 0; qp_attr.port_num = IB_PORT_SERVER; qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ; /* we'll allow client to RDMA write and read on this QP */ int ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS); if (ret) { printf("ERROR: ibv_modify_qp() to INIT failed\n"); exit(1); } /*QP: state: INIT -> RTR (Ready to Receive) */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_RTR; qp_attr.path_mtu = IBV_MTU_4096; qp_attr.dest_qp_num = client_info->qpn; /* qp number of client */ qp_attr.rq_psn = 0 ; qp_attr.max_dest_rd_atomic = 1; /* max in-flight RDMA reads */ qp_attr.min_rnr_timer = 12; qp_attr.ah_attr.is_global = 0; /* No Network Layer (L3) */ qp_attr.ah_attr.dlid = client_info->lid; /* LID (L2 Address) of client */ qp_attr.ah_attr.sl = 0; qp_attr.ah_attr.src_path_bits = 0; qp_attr.ah_attr.port_num = IB_PORT_SERVER; ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER); if (ret) { printf("ERROR: ibv_modify_qp() to RTR failed\n"); exit(1); } /*QP: state: RTR -> RTS (Ready to Send) */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_RTS; qp_attr.sq_psn = 0; qp_attr.timeout = 14; qp_attr.retry_cnt = 7; qp_attr.rnr_retry = 7; qp_attr.max_rd_atomic = 1; ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC); if (ret) { printf("ERROR: ibv_modify_qp() to RTS failed\n"); exit(1); } /* now let's populate the receive QP with recv WQEs */ for (int i = 0; i < OUTSTANDING_REQUESTS; i++) { post_recv(ctx, i); } } void event_loop(server_context *ctx) { /* so the protocol goes like this: * 1. we'll wait for a CQE indicating that we got an Send request from the client. * this tells us we have new work to do. The wr_id we used in post_recv tells us * where the request is. * 2. now we send an RDMA Read to the client to retrieve the request. * we will get a completion indicating the read has completed. * 3. we process the request on the GPU. * 4. upon completion, we send an RDMA Write with immediate to the client with * the results. */ struct ibv_send_wr send_wr; struct ibv_send_wr *bad_send_wr; rpc_request* req; uchar *img_in; uchar *img_out; ibv_sge sgl; bool terminate = false; while (!terminate) { /*step 1: poll for CQE */ struct ibv_wc wc; int ncqes; do { ncqes = ibv_poll_cq(ctx->cq, 1, &wc); } while (ncqes == 0); if (ncqes < 0) { printf("ERROR: ibv_poll_cq() failed\n"); exit(1); } if (wc.status != IBV_WC_SUCCESS) { printf("ERROR: got CQE with error '%s' (%d) (line %d)\n", ibv_wc_status_str(wc.status), wc.status, __LINE__); exit(1); } switch (wc.opcode) { case IBV_WC_RECV: /* Received a new request from the client */ req = &ctx->requests[wc.wr_id]; img_in = &ctx->images_in[wc.wr_id * SQR(IMG_DIMENSION)]; /* Terminate signal */ if (req->request_id == -1) { printf("Terminating...\n"); terminate = true; break; } if (ctx->mode != MODE_RPC_SERVER) { printf("Got client RPC request when running in queue mode.\n"); exit(1); } /* send RDMA Read to client to read the input */ memset(&send_wr, 0, sizeof(struct ibv_send_wr)); send_wr.wr_id = wc.wr_id; sgl.addr = (uintptr_t)img_in; sgl.length = req->input_length; sgl.lkey = ctx->mr_images_in->lkey; send_wr.sg_list = &sgl; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_RDMA_READ; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr.rdma.remote_addr = req->input_addr; send_wr.wr.rdma.rkey = req->input_rkey; if (ibv_post_send(ctx->qp, &send_wr, &bad_send_wr)) { printf("ERROR: ibv_post_send() failed\n"); exit(1); } break; case IBV_WC_RDMA_READ: /* Completed RDMA read for a request */ req = &ctx->requests[wc.wr_id]; img_in = &ctx->images_in[wc.wr_id * SQR(IMG_DIMENSION)]; img_out = &ctx->images_out[wc.wr_id * SQR(IMG_DIMENSION)]; process_image_on_gpu(img_in, img_out); /* send RDMA Write with immediate to client with the response */ memset(&send_wr, 0, sizeof(struct ibv_send_wr)); send_wr.wr_id = wc.wr_id; ibv_sge sgl; sgl.addr = (uintptr_t)img_out; sgl.length = req->output_length; sgl.lkey = ctx->mr_images_out->lkey; send_wr.sg_list = &sgl; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_RDMA_WRITE_WITH_IMM; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr.rdma.remote_addr = req->output_addr; send_wr.wr.rdma.rkey = req->output_rkey; send_wr.imm_data = req->request_id; if (ibv_post_send(ctx->qp, &send_wr, &bad_send_wr)) { printf("ERROR: ibv_post_send() failed\n"); exit(1); } break; case IBV_WC_RDMA_WRITE: /* Completed RDMA Write - reuse buffers for receiving the next requests */ post_recv(ctx, wc.wr_id); break; default: printf("Unexpected completion\n"); assert(false); } } } void teardown_context(server_context *ctx) { /* cleanup */ ibv_destroy_qp(ctx->qp); ibv_destroy_cq(ctx->cq); ibv_dereg_mr(ctx->mr_requests); ibv_dereg_mr(ctx->mr_images_in); ibv_dereg_mr(ctx->mr_images_out); /* TODO destroy the additional server MRs here if needed */ ibv_dereg_mr(ctx->mr_in_queues); ibv_dereg_mr(ctx->mr_out_queues); CUDA_CHECK( hipHostFree(ctx->QinHost) ); CUDA_CHECK( hipHostFree(ctx->QoutHost) ); ibv_dealloc_pd(ctx->pd); ibv_close_device(ctx->context); } int main(int argc, char *argv[]) { server_context ctx; parse_arguments(argc, argv, &ctx.mode, &ctx.tcp_port); if (!ctx.tcp_port) { srand(time(NULL)); ctx.tcp_port = TCP_PORT_OFFSET + (rand() % TCP_PORT_RANGE); /* to avoid conflicts with other users of the machine */ } /* Initialize memory and CUDA resources */ allocate_memory(&ctx); /* Create a TCP connection with the client to exchange InfiniBand parameters */ tcp_connection(&ctx); /* now that client has connected to us via TCP we'll open up some Infiniband resources and send it the parameters */ initialize_verbs(&ctx); /* exchange InfiniBand parameters with the client */ ib_info_t client_info; exchange_parameters(&ctx, &client_info); /* now need to connect the QP to the client's QP. */ connect_qp(&ctx, &client_info); if (ctx.mode == MODE_QUEUE) { /* TODO run the GPU persistent kernel from hw2, for 1024 threads per block */ unsigned int tblocks = getTBlocksAmnt(1024, 2*4*256+256); hipLaunchKernelGGL(( gpu_process_image_pc), dim3(tblocks),dim3(1024), 0, 0, ctx.QinDev,ctx.QoutDev); } /* now finally we get to the actual work, in the event loop */ /* The event loop can be used for queue mode for the termination message */ event_loop(&ctx); printf("Done\n"); teardown_context(&ctx); return 0; }
d2669fb26ce8ef0fbe056b14a9d98fa3b5e83631.cu
#include <infiniband/verbs.h> #include <sys/types.h> #include <sys/socket.h> #include <string.h> #include <assert.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #include "common.h" ///////////////////////////////////////////////// DO NOT CHANGE /////////////////////////////////////// #define TCP_PORT_OFFSET 23456 #define TCP_PORT_RANGE 1000 #define CUDA_CHECK(f) do { \ cudaError_t e = f; \ if (e != cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \ exit(1); \ } \ } while (0) __device__ int arr_min(int arr[], int arr_size) { int tid = threadIdx.x; int rhs, lhs; for (int stride = 1; stride < arr_size; stride *= 2) { if (tid >= stride && tid < arr_size) { rhs = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { lhs = arr[tid]; if (rhs != 0) { if (lhs == 0) arr[tid] = rhs; else arr[tid] = min(arr[tid], rhs); } } __syncthreads(); } int ret = arr[arr_size - 1]; return ret; } __device__ void prefix_sum(int arr[], int arr_size) { int tid = threadIdx.x; int increment; for (int stride = 1; stride < min(blockDim.x, arr_size); stride *= 2) { if (tid >= stride && tid < arr_size) { increment = arr[tid - stride]; } __syncthreads(); if (tid >= stride && tid < arr_size) { arr[tid] += increment; } __syncthreads(); } } __global__ void gpu_process_image(uchar *in, uchar *out) { __shared__ int histogram[256]; __shared__ int hist_min[256]; int tid = threadIdx.x; if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) atomicAdd(&histogram[in[i]], 1); __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { hist_min[tid] = histogram[tid]; } __syncthreads(); int cdf_min = arr_min(hist_min, 256); __shared__ uchar map[256]; if (tid < 256) { int map_value = (float)(histogram[tid] - cdf_min) / (SQR(IMG_DIMENSION) - cdf_min) * 255; map[tid] = (uchar)map_value; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { out[i] = map[in[i]]; } return; } unsigned int getTBlocksAmnt(int threadsPerBlock, int shmemPerBlock) { struct cudaDeviceProp props; CUDA_CHECK( cudaGetDeviceProperties(&props, 0) ); int ThreadsPerSM = min(props.maxThreadsPerMultiProcessor, props.regsPerMultiprocessor/32); int SMCount = props.multiProcessorCount; size_t shmemPerSM = props.sharedMemPerMultiprocessor; return SMCount * min( ThreadsPerSM/threadsPerBlock, (unsigned int)shmemPerSM/shmemPerBlock); } __global__ void gpu_process_image_pc(volatile void* in,volatile void* out) { __shared__ int histogram[256]; __shared__ int hist_min[256]; int tid = threadIdx.x; int bid = blockIdx.x; uchar * jobQptr; int currJobId; Q *inQ = (Q*)in, *outQ = (Q*)out; inQ += bid; outQ += bid; while (true) { __threadfence(); while(inQ->tail >= inQ->head) __threadfence_system(); // wait for queue to contain a job //save the job ptr and the job id jobQptr = inQ->jobs[inQ->tail % QSIZE].job; currJobId = inQ->jobs[inQ->tail % QSIZE].jobId; if(currJobId == DONEJOB) { __threadfence(); return; } /*----------------------------------------------------------------------------*/ /*do here the copy*/ // // //do the calcs __threadfence(); if (tid < 256) { histogram[tid] = 0; } __syncthreads(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { __threadfence_system(); // needed to get currect image data atomicAdd(&histogram[jobQptr[i]], 1); } __syncthreads(); prefix_sum(histogram, 256); if (tid < 256) { hist_min[tid] = histogram[tid]; } __syncthreads(); int cdf_min = arr_min(hist_min, 256); __shared__ uchar map[256]; if (tid < 256) { int map_value = (float)(histogram[tid] - cdf_min) / (SQR(IMG_DIMENSION) - cdf_min) * 255; map[tid] = (uchar)map_value; } __syncthreads(); while(!(outQ->head - outQ->tail < QSIZE)) __threadfence_system(); for (int i = tid; i < SQR(IMG_DIMENSION); i += blockDim.x) { outQ->jobs[outQ->head % QSIZE].job[i] = map[jobQptr[i]]; } outQ->jobs[outQ->head % QSIZE].jobId = currJobId; __threadfence_system(); // try to catch free cell in Qout and copy the result if (tid == 0){ //save the job-out ptr and insert the job id outQ->head ++; inQ->tail ++; // printf("GPU: sent job #%d\n",currJobId); } __threadfence_system(); } } void process_image_on_gpu(uchar *img_in, uchar *img_out) { uchar *gpu_image_in, *gpu_image_out; CUDA_CHECK(cudaMalloc(&gpu_image_in, SQR(IMG_DIMENSION))); CUDA_CHECK(cudaMalloc(&gpu_image_out, SQR(IMG_DIMENSION))); CUDA_CHECK(cudaMemcpy(gpu_image_in, img_in, SQR(IMG_DIMENSION), cudaMemcpyHostToDevice)); gpu_process_image<<<1, 1024>>>(gpu_image_in, gpu_image_out); CUDA_CHECK(cudaMemcpy(img_out, gpu_image_out, SQR(IMG_DIMENSION), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaFree(gpu_image_in)); CUDA_CHECK(cudaFree(gpu_image_out)); } void print_usage_and_die(char *progname) { printf("usage: [port]\n"); exit(1); } struct server_context { mode_enum mode; int tcp_port; int listen_fd; /* Listening socket for TCP connection */ int socket_fd; /* Connected socket for TCP connection */ rpc_request *requests; /* Array of outstanding requests received from the network */ uchar *images_in; /* Input images for all outstanding requests */ uchar *images_out; /* Output images for all outstanding requests */ /* InfiniBand/verbs resources */ struct ibv_context *context; struct ibv_cq *cq; struct ibv_pd *pd; struct ibv_qp *qp; struct ibv_mr *mr_requests; /* Memory region for RPC requests */ struct ibv_mr *mr_images_in; /* Memory region for input images */ struct ibv_mr *mr_images_out; /* Memory region for output images */ /* TODO: add pointers and memory region(s) for CPU-GPU queues */ struct ibv_mr *mr_in_queues; /* Memory region for in queues */ struct ibv_mr *mr_out_queues; /* Memory region for out queues */ Q *QinDev,*QoutDev,*QinHost,*QoutHost; unsigned int tblocks; }; void allocate_memory(server_context *ctx) { CUDA_CHECK(cudaHostAlloc(&ctx->images_in, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), 0)); CUDA_CHECK(cudaHostAlloc(&ctx->images_out, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), 0)); ctx->requests = (rpc_request *)calloc(OUTSTANDING_REQUESTS, sizeof(rpc_request)); /* TODO take CPU-GPU stream allocation code from hw2 */ unsigned int tblocks = getTBlocksAmnt(1024, 2*4*256+256); ctx->tblocks = tblocks; // allocate the queues in CPU memory CUDA_CHECK( cudaHostAlloc(&ctx->QinHost, sizeof(Q)*ctx->tblocks , 0) ); CUDA_CHECK( cudaHostAlloc(&ctx->QoutHost, sizeof(Q)*ctx->tblocks , 0) ); // init memory to 0's memset(ctx->QinHost, 0, sizeof(Q)*ctx->tblocks); memset(ctx->QoutHost, 0, sizeof(Q)*ctx->tblocks); // get a pointer for the GPU to use CUDA_CHECK( cudaHostGetDevicePointer(&ctx->QinDev, ctx->QinHost, 0) ); CUDA_CHECK( cudaHostGetDevicePointer(&ctx->QoutDev, ctx->QoutHost, 0) ); } void tcp_connection(server_context *ctx) { /* setup a TCP connection for initial negotiation with client */ int lfd = socket(AF_INET, SOCK_STREAM, 0); if (lfd < 0) { perror("socket"); exit(1); } ctx->listen_fd = lfd; struct sockaddr_in server_addr; memset(&server_addr, 0, sizeof(struct sockaddr_in)); server_addr.sin_family = AF_INET; server_addr.sin_addr.s_addr = INADDR_ANY; server_addr.sin_port = htons(ctx->tcp_port); if (bind(lfd, (struct sockaddr *)&server_addr, sizeof(struct sockaddr_in)) < 0) { perror("bind"); exit(1); } if (listen(lfd, 1)) { perror("listen"); exit(1); } printf("Server waiting on port %d. Client can connect\n", ctx->tcp_port); int sfd = accept(lfd, NULL, NULL); if (sfd < 0) { perror("accept"); exit(1); } printf("client connected\n"); ctx->socket_fd = sfd; } void initialize_verbs(server_context *ctx) { /* get device list */ struct ibv_device **device_list = ibv_get_device_list(NULL); if (!device_list) { printf("ERROR: ibv_get_device_list failed\n"); exit(1); } /* select first (and only) device to work with */ ctx->context = ibv_open_device(device_list[0]); /* create protection domain (PD) */ ctx->pd = ibv_alloc_pd(ctx->context); if (!ctx->pd) { printf("ERROR: ibv_alloc_pd() failed\n"); exit(1); } /* allocate a memory region for the RPC requests. */ ctx->mr_requests = ibv_reg_mr(ctx->pd, ctx->requests, sizeof(rpc_request) * OUTSTANDING_REQUESTS, IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_requests) { printf("ibv_reg_mr() failed for requests\n"); exit(1); } /* register a memory region for the input / output images. */ ctx->mr_images_in = ibv_reg_mr(ctx->pd, ctx->images_in, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_images_in) { printf("ibv_reg_mr() failed for input images\n"); exit(1); } /* register a memory region for the input / output images. */ ctx->mr_images_out = ibv_reg_mr(ctx->pd, ctx->images_out, OUTSTANDING_REQUESTS * SQR(IMG_DIMENSION), IBV_ACCESS_LOCAL_WRITE); if (!ctx->mr_images_out) { printf("ibv_reg_mr() failed for output images\n"); exit(1); } /* TODO register additional memory regions for CPU-GPU queues */ ctx->mr_in_queues = ibv_reg_mr(ctx->pd, ctx->QinHost, sizeof(Q)*ctx->tblocks, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ); if (!ctx->mr_in_queues) { printf("ibv_reg_mr() failed for in queue\n"); // TODO REMOVE exit(1); } ctx->mr_out_queues = ibv_reg_mr(ctx->pd, ctx->QoutHost, sizeof(Q)*ctx->tblocks, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ); if (!ctx->mr_images_out) { printf("ibv_reg_mr() failed for out queue\n"); // TODO REMOVE exit(1); } /* create completion queue (CQ). We'll use same CQ for both send and receive parts of the QP */ ctx->cq = ibv_create_cq(ctx->context, 2 * OUTSTANDING_REQUESTS, NULL, NULL, 0); /* create a CQ with place for two completions per request */ if (!ctx->cq) { printf("ERROR: ibv_create_cq() failed\n"); exit(1); } /* create QP */ struct ibv_qp_init_attr qp_init_attr; memset(&qp_init_attr, 0, sizeof(struct ibv_qp_init_attr)); qp_init_attr.send_cq = ctx->cq; qp_init_attr.recv_cq = ctx->cq; qp_init_attr.qp_type = IBV_QPT_RC; /* we'll use RC transport service, which supports RDMA */ qp_init_attr.cap.max_send_wr = OUTSTANDING_REQUESTS; /* max of 1 WQE in-flight in SQ per request. that's enough for us */ qp_init_attr.cap.max_recv_wr = OUTSTANDING_REQUESTS; /* max of 1 WQE in-flight in RQ per request. that's enough for us */ qp_init_attr.cap.max_send_sge = 1; /* 1 SGE in each send WQE */ qp_init_attr.cap.max_recv_sge = 1; /* 1 SGE in each recv WQE */ ctx->qp = ibv_create_qp(ctx->pd, &qp_init_attr); if (!ctx->qp) { printf("ERROR: ibv_create_qp() failed\n"); exit(1); } } void exchange_parameters(server_context *ctx, ib_info_t *client_info) { /* ok, before we continue we need to get info about the client' QP, and send it info about ours. * namely: QP number, and LID. * we'll use the TCP socket for that */ /* first query port for its LID (L2 address) */ int ret; struct ibv_port_attr port_attr; ret = ibv_query_port(ctx->context, IB_PORT_SERVER, &port_attr); if (ret) { printf("ERROR: ibv_query_port() failed\n"); exit(1); } /* now send our info to client */ struct ib_info_t my_info; my_info.lid = port_attr.lid; my_info.qpn = ctx->qp->qp_num; /* TODO add additional server rkeys / addresses here if needed */ my_info.tblocks = ctx->tblocks; my_info.inQaddr = ctx->mr_in_queues->addr; my_info.outQaddr = ctx->mr_out_queues->addr; my_info.rkeyIn = ctx->mr_in_queues->rkey; my_info.rkeyOut = ctx->mr_out_queues->rkey; ret = send(ctx->socket_fd, &my_info, sizeof(struct ib_info_t), 0); if (ret < 0) { perror("send"); exit(1); } /* get client's info */ recv(ctx->socket_fd, client_info, sizeof(struct ib_info_t), 0); if (ret < 0) { perror("recv"); exit(1); } /* we don't need TCP anymore. kill the socket */ close(ctx->socket_fd); close(ctx->listen_fd); ctx->socket_fd = ctx->listen_fd = 0; } /* Post a receive buffer of the given index (from the requests array) to the receive queue */ void post_recv(server_context *ctx, int index) { struct ibv_recv_wr recv_wr = {}; /* this is the receive work request (the verb's representation for receive WQE) */ ibv_sge sgl; recv_wr.wr_id = index; sgl.addr = (uintptr_t)&ctx->requests[index]; sgl.length = sizeof(ctx->requests[0]); sgl.lkey = ctx->mr_requests->lkey; recv_wr.sg_list = &sgl; recv_wr.num_sge = 1; if (ibv_post_recv(ctx->qp, &recv_wr, NULL)) { printf("ERROR: ibv_post_recv() failed\n"); exit(1); } } void connect_qp(server_context *ctx, ib_info_t *client_info) { /* this is a multi-phase process, moving the state machine of the QP step by step * until we are ready */ struct ibv_qp_attr qp_attr; /*QP state: RESET -> INIT */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_INIT; qp_attr.pkey_index = 0; qp_attr.port_num = IB_PORT_SERVER; qp_attr.qp_access_flags = IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ; /* we'll allow client to RDMA write and read on this QP */ int ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS); if (ret) { printf("ERROR: ibv_modify_qp() to INIT failed\n"); exit(1); } /*QP: state: INIT -> RTR (Ready to Receive) */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_RTR; qp_attr.path_mtu = IBV_MTU_4096; qp_attr.dest_qp_num = client_info->qpn; /* qp number of client */ qp_attr.rq_psn = 0 ; qp_attr.max_dest_rd_atomic = 1; /* max in-flight RDMA reads */ qp_attr.min_rnr_timer = 12; qp_attr.ah_attr.is_global = 0; /* No Network Layer (L3) */ qp_attr.ah_attr.dlid = client_info->lid; /* LID (L2 Address) of client */ qp_attr.ah_attr.sl = 0; qp_attr.ah_attr.src_path_bits = 0; qp_attr.ah_attr.port_num = IB_PORT_SERVER; ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER); if (ret) { printf("ERROR: ibv_modify_qp() to RTR failed\n"); exit(1); } /*QP: state: RTR -> RTS (Ready to Send) */ memset(&qp_attr, 0, sizeof(struct ibv_qp_attr)); qp_attr.qp_state = IBV_QPS_RTS; qp_attr.sq_psn = 0; qp_attr.timeout = 14; qp_attr.retry_cnt = 7; qp_attr.rnr_retry = 7; qp_attr.max_rd_atomic = 1; ret = ibv_modify_qp(ctx->qp, &qp_attr, IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY | IBV_QP_SQ_PSN | IBV_QP_MAX_QP_RD_ATOMIC); if (ret) { printf("ERROR: ibv_modify_qp() to RTS failed\n"); exit(1); } /* now let's populate the receive QP with recv WQEs */ for (int i = 0; i < OUTSTANDING_REQUESTS; i++) { post_recv(ctx, i); } } void event_loop(server_context *ctx) { /* so the protocol goes like this: * 1. we'll wait for a CQE indicating that we got an Send request from the client. * this tells us we have new work to do. The wr_id we used in post_recv tells us * where the request is. * 2. now we send an RDMA Read to the client to retrieve the request. * we will get a completion indicating the read has completed. * 3. we process the request on the GPU. * 4. upon completion, we send an RDMA Write with immediate to the client with * the results. */ struct ibv_send_wr send_wr; struct ibv_send_wr *bad_send_wr; rpc_request* req; uchar *img_in; uchar *img_out; ibv_sge sgl; bool terminate = false; while (!terminate) { /*step 1: poll for CQE */ struct ibv_wc wc; int ncqes; do { ncqes = ibv_poll_cq(ctx->cq, 1, &wc); } while (ncqes == 0); if (ncqes < 0) { printf("ERROR: ibv_poll_cq() failed\n"); exit(1); } if (wc.status != IBV_WC_SUCCESS) { printf("ERROR: got CQE with error '%s' (%d) (line %d)\n", ibv_wc_status_str(wc.status), wc.status, __LINE__); exit(1); } switch (wc.opcode) { case IBV_WC_RECV: /* Received a new request from the client */ req = &ctx->requests[wc.wr_id]; img_in = &ctx->images_in[wc.wr_id * SQR(IMG_DIMENSION)]; /* Terminate signal */ if (req->request_id == -1) { printf("Terminating...\n"); terminate = true; break; } if (ctx->mode != MODE_RPC_SERVER) { printf("Got client RPC request when running in queue mode.\n"); exit(1); } /* send RDMA Read to client to read the input */ memset(&send_wr, 0, sizeof(struct ibv_send_wr)); send_wr.wr_id = wc.wr_id; sgl.addr = (uintptr_t)img_in; sgl.length = req->input_length; sgl.lkey = ctx->mr_images_in->lkey; send_wr.sg_list = &sgl; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_RDMA_READ; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr.rdma.remote_addr = req->input_addr; send_wr.wr.rdma.rkey = req->input_rkey; if (ibv_post_send(ctx->qp, &send_wr, &bad_send_wr)) { printf("ERROR: ibv_post_send() failed\n"); exit(1); } break; case IBV_WC_RDMA_READ: /* Completed RDMA read for a request */ req = &ctx->requests[wc.wr_id]; img_in = &ctx->images_in[wc.wr_id * SQR(IMG_DIMENSION)]; img_out = &ctx->images_out[wc.wr_id * SQR(IMG_DIMENSION)]; process_image_on_gpu(img_in, img_out); /* send RDMA Write with immediate to client with the response */ memset(&send_wr, 0, sizeof(struct ibv_send_wr)); send_wr.wr_id = wc.wr_id; ibv_sge sgl; sgl.addr = (uintptr_t)img_out; sgl.length = req->output_length; sgl.lkey = ctx->mr_images_out->lkey; send_wr.sg_list = &sgl; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_RDMA_WRITE_WITH_IMM; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr.rdma.remote_addr = req->output_addr; send_wr.wr.rdma.rkey = req->output_rkey; send_wr.imm_data = req->request_id; if (ibv_post_send(ctx->qp, &send_wr, &bad_send_wr)) { printf("ERROR: ibv_post_send() failed\n"); exit(1); } break; case IBV_WC_RDMA_WRITE: /* Completed RDMA Write - reuse buffers for receiving the next requests */ post_recv(ctx, wc.wr_id); break; default: printf("Unexpected completion\n"); assert(false); } } } void teardown_context(server_context *ctx) { /* cleanup */ ibv_destroy_qp(ctx->qp); ibv_destroy_cq(ctx->cq); ibv_dereg_mr(ctx->mr_requests); ibv_dereg_mr(ctx->mr_images_in); ibv_dereg_mr(ctx->mr_images_out); /* TODO destroy the additional server MRs here if needed */ ibv_dereg_mr(ctx->mr_in_queues); ibv_dereg_mr(ctx->mr_out_queues); CUDA_CHECK( cudaFreeHost(ctx->QinHost) ); CUDA_CHECK( cudaFreeHost(ctx->QoutHost) ); ibv_dealloc_pd(ctx->pd); ibv_close_device(ctx->context); } int main(int argc, char *argv[]) { server_context ctx; parse_arguments(argc, argv, &ctx.mode, &ctx.tcp_port); if (!ctx.tcp_port) { srand(time(NULL)); ctx.tcp_port = TCP_PORT_OFFSET + (rand() % TCP_PORT_RANGE); /* to avoid conflicts with other users of the machine */ } /* Initialize memory and CUDA resources */ allocate_memory(&ctx); /* Create a TCP connection with the client to exchange InfiniBand parameters */ tcp_connection(&ctx); /* now that client has connected to us via TCP we'll open up some Infiniband resources and send it the parameters */ initialize_verbs(&ctx); /* exchange InfiniBand parameters with the client */ ib_info_t client_info; exchange_parameters(&ctx, &client_info); /* now need to connect the QP to the client's QP. */ connect_qp(&ctx, &client_info); if (ctx.mode == MODE_QUEUE) { /* TODO run the GPU persistent kernel from hw2, for 1024 threads per block */ unsigned int tblocks = getTBlocksAmnt(1024, 2*4*256+256); gpu_process_image_pc<<<tblocks,1024>>>(ctx.QinDev,ctx.QoutDev); } /* now finally we get to the actual work, in the event loop */ /* The event loop can be used for queue mode for the termination message */ event_loop(&ctx); printf("Done\n"); teardown_context(&ctx); return 0; }
f9c313a3dd485cce34693113118c231ae92bdafb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "diffusion3d_cuda.h" #include <assert.h> #include <stdio.h> #define CUDA_SAFE_CALL(c) \ do { \ assert(c == hipSuccess); \ } while (0) namespace diffusion3d { #if __CUDA_ARCH__ >= 350 #define LDG(x) __ldg(&(x)) #else #define LDG(x) (x) #endif //#define GET(x) LDG(x) #define GET(x) (x) #define bdimx (BLOCK_X) #define bdimy (BLOCK_Y) #define SHIFT3(x, y, z) x = y; y = z #define SHIFT4(x, y, z, k) x = y; y = z; z = k #define diffusion_backward() \ do { \ sb[ps] = s2; \ __syncthreads(); \ f2[p-xy] = cc * s2 \ + cw * sb[ps+sb_w] + ce * sb[ps+sb_e] \ + cs * sb[ps+sb_s] + cn * sb[ps+sb_n] + cb*s1 + ct*s3; \ } while (0) // Temporal blocking // z blocking // sperate warp for diagonal points __global__ void diffusion_kernel_shared6(REAL *f1, REAL *f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) { extern __shared__ REAL sb[]; const int sbx = bdimx+4; const int tidx = threadIdx.x % bdimx; const int tidy = threadIdx.x / bdimx - 1; int i = bdimx * blockIdx.x + tidx; int j = bdimy * blockIdx.y + tidy; j = (j < 0) ? 0 : j; // max(j, 0) j = (j == ny) ? ny - 1 : j; // min(j, ny-1) int xy = nx * ny; const int block_z = nz / gridDim.z; int k = (blockIdx.z == 0) ? 0: block_z * blockIdx.z - 1; const int k_end = (blockIdx.z == gridDim.z-1) ? nz: block_z * (blockIdx.z + 1) + 1; int p = i + j * nx + k *xy; int ps = tidx+2 + (tidy+1) * sbx; if (tidy == -1) { int s = (j == 0) ? 0 : -nx; float t2 = GET(f1[p]); float t1 = (k == 0) ? t2 : GET(f1[p-xy]); float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2; sb[ps] = t2; __syncthreads(); float s2, s3; s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb*t1 + ct*t3; p += xy; __syncthreads(); ++k; if (k != 1) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb*t1 + ct*t3; __syncthreads(); p += xy; ++k; } for (; k < k_end; ++k) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; } if (k == nz) { s2 = s3; sb[ps] = s2; __syncthreads(); } } else if (tidy == bdimy) { int n = (j == ny-1) ? 0 : nx; float t2 = GET(f1[p]); float t1 = (k == 0) ? t2 : GET(f1[p-xy]); float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2; sb[ps] = t2; __syncthreads(); float s2, s3; s2 = s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3; p += xy; __syncthreads(); ++k; if (k != 1) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3; p += xy; __syncthreads(); ++k; } for (; k < k_end; ++k) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; } if (k == nz) { s2 = s3; sb[ps] = s2; __syncthreads(); } } else if (tidy >= 0 && tidy < bdimy) { int sb_s = (j == 0) ? 0: -sbx; int sb_n = (j == ny-1) ? 0: sbx; int sb_w = (i == 0) ? 0: -1; int sb_e = (i == nx-1) ? 0: 1; float t2 = GET(f1[p]); float t1 = (k == 0) ? t2 : GET(f1[p-xy]); float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2; sb[ps] = t2; __syncthreads(); float s1, s2, s3; s2 = s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); ++k; if (k != 1) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; SHIFT3(s1, s2, s3); __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); ++k; } for (; k < k_end; ++k) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; SHIFT3(s1, s2, s3); __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; __syncthreads(); diffusion_backward(); __syncthreads(); p += xy; } if (k == nz) { SHIFT3(s1, s2, s3); diffusion_backward(); } } else if (tidx < 32 && tidy == bdimy + 1) { // horizontal halo int xoffset = (tidx & 1) + ((tidx & 2) >> 1) * (bdimx + 2); int yoffset = (tidx >> 2) + 1; yoffset = (yoffset >= (bdimy+1)) ? bdimy : yoffset; i = bdimx * blockIdx.x - 2 + xoffset; i = (i < 0) ? 0 : i; i = (i >= nx) ? nx - 1 : i; j = bdimy * blockIdx.y -1 + yoffset; j = (j < 0) ? 0 : j; // max(j, 0) j = (j >= ny) ? ny - 1 : j; // min(j, ny-1) int s = -sbx; int n = sbx; int w = (xoffset == 0) ? 0 : -1; int e = (xoffset == sbx-1) ? 0 : 1; p = i + j * nx + k * xy; ps = xoffset + yoffset * sbx; float t2 = LDG(f1[p]); float t1 = (k == 0) ? t2 : LDG(f1[p-xy]); float t3 = (k < nz-1) ? LDG(f1[p+xy]) : t2; float t4 = (k < nz-2) ? LDG(f1[p+xy*2]) : t3; sb[ps] = t2; __syncthreads(); float s2, s3; s2 = s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); p += xy; ++k; if (k != 1) { SHIFT4(t1, t2, t3, t4); t4 = LDG(f1[p+xy*2]); sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); p += xy; ++k; } #pragma unroll for (; k < k_end-2; ++k) { SHIFT4(t1, t2, t3, t4); t4 = LDG(f1[p+xy*2]); sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; } SHIFT4(t1, t2, t3, t4); t4 = (k < nz-2) ? LDG(f1[p+xy*2]) : t4; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; ++k; SHIFT4(t1, t2, t3, t4); sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; ++k; if (k == nz) { s2 = s3; sb[ps] = s2; __syncthreads(); } } else { //const int tidx2 = tidx & 31; // 2nd warp int xoffset = 1 + (tidx & 1) * (bdimx + 1); int yoffset = ((tidx & 2) >> 1) * (bdimy + 1); i = bdimx * blockIdx.x - 2 + xoffset; i = (i < 0) ? 0 : i; i = (i >= nx) ? nx - 1 : i; j = bdimy * blockIdx.y -1 + yoffset; j = (j < 0) ? 0 : j; // max(j, 0) j = (j >= ny) ? ny - 1 : j; // min(j, ny-1) p = i + j * nx + k * xy; ps = xoffset + yoffset * sbx; float t2, t3, t4; //bool active = tidx2 < 4; const bool active = 1; if (active) { t2 = LDG(f1[p]); t3 = LDG(f1[p+xy]); t4 = LDG(f1[p+xy*2]); sb[ps] = t2; } __syncthreads(); __syncthreads(); p += xy; ++k; if (k != 1) { SHIFT3(t2, t3, t4); if (active) { t4 = LDG(f1[p+xy*2]); sb[ps] = t2; } __syncthreads(); __syncthreads(); p += xy; ++k; } #pragma unroll for (; k < k_end-2; ++k) { SHIFT3(t2, t3, t4); if (active) { t4 = LDG(f1[p+xy*2]); sb[ps] = t2; } __syncthreads(); __syncthreads(); __syncthreads(); __syncthreads(); p += xy; } SHIFT3(t2, t3, t4); if (active) { sb[ps] = t2; } __syncthreads(); __syncthreads(); __syncthreads(); __syncthreads(); p += xy; ++k; t2 = t3; if (active) { sb[ps] = t2; } __syncthreads(); __syncthreads(); __syncthreads(); __syncthreads(); p += xy; if (k == nz) { __syncthreads(); } } return; } void Diffusion3DCUDAShared6::RunKernel(int count) { int flag = 0; size_t s = sizeof(REAL) * nx_ * ny_ * nz_; CUDA_SAFE_CALL(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice)); assert(count % 2 == 0); //dim3 block_dim(bdimx * bdimy + 32); // + 1 warp dim3 block_dim(bdimx * (bdimy+2) + (32*2)); dim3 grid_dim(nx_ / bdimx, ny_ / bdimy, grid_z_); #pragma omp parallel num_threads(2) shared(flag) { if (omp_get_thread_num() == 0) { power = GetPowerGPU(&flag, 0); } else { #pragma omp barrier CUDA_SAFE_CALL(hipEventRecord(ev1_)); for (int i = 0; i < count; i+=2) { hipLaunchKernelGGL(( diffusion_kernel_shared6), dim3(grid_dim), dim3(block_dim), (bdimx+4)*(bdimy+2)*sizeof(float), 0, f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_); REAL *t = f1_d_; f1_d_ = f2_d_; f2_d_ = t; } CUDA_SAFE_CALL(hipEventRecord(ev2_)); CUDA_SAFE_CALL(hipDeviceSynchronize()); flag = 1; } } CUDA_SAFE_CALL(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost)); return; } void Diffusion3DCUDAShared6::InitializeBenchmark() { Diffusion3DCUDA::InitializeBenchmark(); CUDA_SAFE_CALL(hipFuncSetCacheConfig(diffusion_kernel_shared6, hipFuncCachePreferShared)); } }
f9c313a3dd485cce34693113118c231ae92bdafb.cu
#include "diffusion3d_cuda.h" #include <assert.h> #include <stdio.h> #define CUDA_SAFE_CALL(c) \ do { \ assert(c == cudaSuccess); \ } while (0) namespace diffusion3d { #if __CUDA_ARCH__ >= 350 #define LDG(x) __ldg(&(x)) #else #define LDG(x) (x) #endif //#define GET(x) LDG(x) #define GET(x) (x) #define bdimx (BLOCK_X) #define bdimy (BLOCK_Y) #define SHIFT3(x, y, z) x = y; y = z #define SHIFT4(x, y, z, k) x = y; y = z; z = k #define diffusion_backward() \ do { \ sb[ps] = s2; \ __syncthreads(); \ f2[p-xy] = cc * s2 \ + cw * sb[ps+sb_w] + ce * sb[ps+sb_e] \ + cs * sb[ps+sb_s] + cn * sb[ps+sb_n] + cb*s1 + ct*s3; \ } while (0) // Temporal blocking // z blocking // sperate warp for diagonal points __global__ void diffusion_kernel_shared6(REAL *f1, REAL *f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) { extern __shared__ REAL sb[]; const int sbx = bdimx+4; const int tidx = threadIdx.x % bdimx; const int tidy = threadIdx.x / bdimx - 1; int i = bdimx * blockIdx.x + tidx; int j = bdimy * blockIdx.y + tidy; j = (j < 0) ? 0 : j; // max(j, 0) j = (j == ny) ? ny - 1 : j; // min(j, ny-1) int xy = nx * ny; const int block_z = nz / gridDim.z; int k = (blockIdx.z == 0) ? 0: block_z * blockIdx.z - 1; const int k_end = (blockIdx.z == gridDim.z-1) ? nz: block_z * (blockIdx.z + 1) + 1; int p = i + j * nx + k *xy; int ps = tidx+2 + (tidy+1) * sbx; if (tidy == -1) { int s = (j == 0) ? 0 : -nx; float t2 = GET(f1[p]); float t1 = (k == 0) ? t2 : GET(f1[p-xy]); float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2; sb[ps] = t2; __syncthreads(); float s2, s3; s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb*t1 + ct*t3; p += xy; __syncthreads(); ++k; if (k != 1) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb*t1 + ct*t3; __syncthreads(); p += xy; ++k; } for (; k < k_end; ++k) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * GET(f1[p+s]) + cn * sb[ps+sbx] + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; } if (k == nz) { s2 = s3; sb[ps] = s2; __syncthreads(); } } else if (tidy == bdimy) { int n = (j == ny-1) ? 0 : nx; float t2 = GET(f1[p]); float t1 = (k == 0) ? t2 : GET(f1[p-xy]); float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2; sb[ps] = t2; __syncthreads(); float s2, s3; s2 = s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3; p += xy; __syncthreads(); ++k; if (k != 1) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3; p += xy; __syncthreads(); ++k; } for (; k < k_end; ++k) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx] + cn * GET(f1[p+n]) + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; } if (k == nz) { s2 = s3; sb[ps] = s2; __syncthreads(); } } else if (tidy >= 0 && tidy < bdimy) { int sb_s = (j == 0) ? 0: -sbx; int sb_n = (j == ny-1) ? 0: sbx; int sb_w = (i == 0) ? 0: -1; int sb_e = (i == nx-1) ? 0: 1; float t2 = GET(f1[p]); float t1 = (k == 0) ? t2 : GET(f1[p-xy]); float t3 = (k < nz-1) ? GET(f1[p+xy]) : t2; sb[ps] = t2; __syncthreads(); float s1, s2, s3; s2 = s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); ++k; if (k != 1) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; SHIFT3(s1, s2, s3); __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; p += xy; __syncthreads(); ++k; } for (; k < k_end; ++k) { SHIFT3(t1, t2, t3); t3 = (k < nz-1) ? GET(f1[p+xy]) : t3; sb[ps] = t2; SHIFT3(s1, s2, s3); __syncthreads(); s3 = cc * t2 + cw * sb[ps-1] + ce * sb[ps+1] + cs * sb[ps-sbx]+ cn * sb[ps+sbx] + cb * t1 + ct * t3; __syncthreads(); diffusion_backward(); __syncthreads(); p += xy; } if (k == nz) { SHIFT3(s1, s2, s3); diffusion_backward(); } } else if (tidx < 32 && tidy == bdimy + 1) { // horizontal halo int xoffset = (tidx & 1) + ((tidx & 2) >> 1) * (bdimx + 2); int yoffset = (tidx >> 2) + 1; yoffset = (yoffset >= (bdimy+1)) ? bdimy : yoffset; i = bdimx * blockIdx.x - 2 + xoffset; i = (i < 0) ? 0 : i; i = (i >= nx) ? nx - 1 : i; j = bdimy * blockIdx.y -1 + yoffset; j = (j < 0) ? 0 : j; // max(j, 0) j = (j >= ny) ? ny - 1 : j; // min(j, ny-1) int s = -sbx; int n = sbx; int w = (xoffset == 0) ? 0 : -1; int e = (xoffset == sbx-1) ? 0 : 1; p = i + j * nx + k * xy; ps = xoffset + yoffset * sbx; float t2 = LDG(f1[p]); float t1 = (k == 0) ? t2 : LDG(f1[p-xy]); float t3 = (k < nz-1) ? LDG(f1[p+xy]) : t2; float t4 = (k < nz-2) ? LDG(f1[p+xy*2]) : t3; sb[ps] = t2; __syncthreads(); float s2, s3; s2 = s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); p += xy; ++k; if (k != 1) { SHIFT4(t1, t2, t3, t4); t4 = LDG(f1[p+xy*2]); sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); p += xy; ++k; } #pragma unroll for (; k < k_end-2; ++k) { SHIFT4(t1, t2, t3, t4); t4 = LDG(f1[p+xy*2]); sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; } SHIFT4(t1, t2, t3, t4); t4 = (k < nz-2) ? LDG(f1[p+xy*2]) : t4; sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; ++k; SHIFT4(t1, t2, t3, t4); sb[ps] = t2; s2 = s3; __syncthreads(); s3 = cc * t2 + cw * sb[ps+w] + ce * sb[ps+e] + cs * sb[ps+s] + cn * sb[ps+n] + cb*t1 + ct*t3; __syncthreads(); sb[ps] = s2; __syncthreads(); __syncthreads(); p += xy; ++k; if (k == nz) { s2 = s3; sb[ps] = s2; __syncthreads(); } } else { //const int tidx2 = tidx & 31; // 2nd warp int xoffset = 1 + (tidx & 1) * (bdimx + 1); int yoffset = ((tidx & 2) >> 1) * (bdimy + 1); i = bdimx * blockIdx.x - 2 + xoffset; i = (i < 0) ? 0 : i; i = (i >= nx) ? nx - 1 : i; j = bdimy * blockIdx.y -1 + yoffset; j = (j < 0) ? 0 : j; // max(j, 0) j = (j >= ny) ? ny - 1 : j; // min(j, ny-1) p = i + j * nx + k * xy; ps = xoffset + yoffset * sbx; float t2, t3, t4; //bool active = tidx2 < 4; const bool active = 1; if (active) { t2 = LDG(f1[p]); t3 = LDG(f1[p+xy]); t4 = LDG(f1[p+xy*2]); sb[ps] = t2; } __syncthreads(); __syncthreads(); p += xy; ++k; if (k != 1) { SHIFT3(t2, t3, t4); if (active) { t4 = LDG(f1[p+xy*2]); sb[ps] = t2; } __syncthreads(); __syncthreads(); p += xy; ++k; } #pragma unroll for (; k < k_end-2; ++k) { SHIFT3(t2, t3, t4); if (active) { t4 = LDG(f1[p+xy*2]); sb[ps] = t2; } __syncthreads(); __syncthreads(); __syncthreads(); __syncthreads(); p += xy; } SHIFT3(t2, t3, t4); if (active) { sb[ps] = t2; } __syncthreads(); __syncthreads(); __syncthreads(); __syncthreads(); p += xy; ++k; t2 = t3; if (active) { sb[ps] = t2; } __syncthreads(); __syncthreads(); __syncthreads(); __syncthreads(); p += xy; if (k == nz) { __syncthreads(); } } return; } void Diffusion3DCUDAShared6::RunKernel(int count) { int flag = 0; size_t s = sizeof(REAL) * nx_ * ny_ * nz_; CUDA_SAFE_CALL(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice)); assert(count % 2 == 0); //dim3 block_dim(bdimx * bdimy + 32); // + 1 warp dim3 block_dim(bdimx * (bdimy+2) + (32*2)); dim3 grid_dim(nx_ / bdimx, ny_ / bdimy, grid_z_); #pragma omp parallel num_threads(2) shared(flag) { if (omp_get_thread_num() == 0) { power = GetPowerGPU(&flag, 0); } else { #pragma omp barrier CUDA_SAFE_CALL(cudaEventRecord(ev1_)); for (int i = 0; i < count; i+=2) { diffusion_kernel_shared6<<<grid_dim, block_dim, (bdimx+4)*(bdimy+2)*sizeof(float)>>> (f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_); REAL *t = f1_d_; f1_d_ = f2_d_; f2_d_ = t; } CUDA_SAFE_CALL(cudaEventRecord(ev2_)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); flag = 1; } } CUDA_SAFE_CALL(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost)); return; } void Diffusion3DCUDAShared6::InitializeBenchmark() { Diffusion3DCUDA::InitializeBenchmark(); CUDA_SAFE_CALL(cudaFuncSetCacheConfig(diffusion_kernel_shared6, cudaFuncCachePreferShared)); } }
004d16e7ebac95bd53e0b4297cf03cd04ce9c5f4.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "bitmask/bitmask.h" #include "cudf_util/detail.h" #include "util/cuda_helper.h" #include <cudf/reduction.hpp> #include <cudf/column/column_view.hpp> #include <thrust/transform.h> #include <rmm/exec_policy.hpp> namespace legate { namespace pandas { void Bitmask::set_all_valid(hipStream_t stream) { hipMemsetAsync(bitmask, 0x01, num_elements, stream); } void Bitmask::clear(hipStream_t stream) { hipMemsetAsync(bitmask, 0x00, num_elements, stream); } size_t Bitmask::count_unset_bits(hipStream_t stream) const { cudf::column_view boolmask{ cudf::data_type{cudf::type_id::UINT8}, static_cast<cudf::size_type>(num_elements), bitmask}; auto type_id = cudf::data_type{cudf::type_to_id<int32_t>()}; rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource(); auto out = cudf::detail::reduce(boolmask, cudf::make_sum_aggregation(), type_id, stream, mr); auto null_count = static_cast<cudf::scalar_type_t<int32_t> *>(out.get())->value(stream); assert(num_elements >= null_count); return num_elements - null_count; } void Bitmask::copy(const Bitmask &target, hipStream_t stream) const { hipMemcpyAsync(target.bitmask, bitmask, num_elements, hipMemcpyDeviceToDevice, stream); } void intersect_bitmasks(Bitmask &out, const Bitmask &in1, const Bitmask &in2, hipStream_t stream) { auto start = thrust::make_counting_iterator<int64_t>(0); auto stop = thrust::make_counting_iterator<int64_t>(static_cast<int64_t>(out.num_elements)); thrust::for_each(rmm::exec_policy(stream), start, stop, [out, in1, in2] __device__(auto idx) { out.set(idx, in1.get(idx) && in2.get(idx)); }); } } // namespace pandas } // namespace legate
004d16e7ebac95bd53e0b4297cf03cd04ce9c5f4.cu
/* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "bitmask/bitmask.h" #include "cudf_util/detail.h" #include "util/cuda_helper.h" #include <cudf/reduction.hpp> #include <cudf/column/column_view.hpp> #include <thrust/transform.h> #include <rmm/exec_policy.hpp> namespace legate { namespace pandas { void Bitmask::set_all_valid(cudaStream_t stream) { cudaMemsetAsync(bitmask, 0x01, num_elements, stream); } void Bitmask::clear(cudaStream_t stream) { cudaMemsetAsync(bitmask, 0x00, num_elements, stream); } size_t Bitmask::count_unset_bits(cudaStream_t stream) const { cudf::column_view boolmask{ cudf::data_type{cudf::type_id::UINT8}, static_cast<cudf::size_type>(num_elements), bitmask}; auto type_id = cudf::data_type{cudf::type_to_id<int32_t>()}; rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource(); auto out = cudf::detail::reduce(boolmask, cudf::make_sum_aggregation(), type_id, stream, mr); auto null_count = static_cast<cudf::scalar_type_t<int32_t> *>(out.get())->value(stream); assert(num_elements >= null_count); return num_elements - null_count; } void Bitmask::copy(const Bitmask &target, cudaStream_t stream) const { cudaMemcpyAsync(target.bitmask, bitmask, num_elements, cudaMemcpyDeviceToDevice, stream); } void intersect_bitmasks(Bitmask &out, const Bitmask &in1, const Bitmask &in2, cudaStream_t stream) { auto start = thrust::make_counting_iterator<int64_t>(0); auto stop = thrust::make_counting_iterator<int64_t>(static_cast<int64_t>(out.num_elements)); thrust::for_each(rmm::exec_policy(stream), start, stop, [out, in1, in2] __device__(auto idx) { out.set(idx, in1.get(idx) && in2.get(idx)); }); } } // namespace pandas } // namespace legate
a0e5a0d6b3a8e0c71acc419bae3828fecb4bbd04.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "graph.h" #include "sssp-coalescing.h" #include "timer.h" #include <queue> #include <omp.h> #include <unordered_map> //#define INF 1000000000 __constant__ unsigned INF = 1000000000; const int INF_NEW = 1000000; __global__ void initialize(Graph G, unsigned* d_dist, uint64_t nnodes){ uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; if(gid < nnodes) { d_dist[gid] = INF; } } __device__ bool processedge(Graph& G, uint64_t nnodes, unsigned* d_dist, uint64_t worknode, unsigned i, uint64_t& dst) { dst = G. getDest(worknode,i); // get the i-th neighbor of worknode if(dst >= nnodes) return false; unsigned wt = G.getWt(worknode, i); // get edge-weight of the i-th edge if(wt >= INF) return false; unsigned altdist = d_dist[worknode] + wt; if(altdist < d_dist[dst]) { // a possible site for thread divergence unsigned olddist = atomicMin(&d_dist[dst], altdist); if(altdist < olddist) return true; // dist is updated to a lower value (another possible site for thread divergence) } return false; } __device__ bool processnode(Graph& G, uint64_t nnodes, unsigned* d_dist, uint64_t worknode) { if(worknode >= nnodes) return false; bool changed = false; // thread-local unsigned outDegree = G.getDegree(worknode); for(unsigned i=0; i<outDegree; ++i) { uint64_t dst = nnodes; unsigned olddist = processedge(G, nnodes, d_dist, worknode, i, dst); if(olddist) changed = true; } return changed; } __global__ void ssspCompute(Graph G, uint64_t nnodes, uint64_t nedges, unsigned* d_dist, bool* d_changed) { uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; uint64_t src = gid; // node under consideration if(processnode(G, nnodes, d_dist, src)) *d_changed = true; } void sssp_parallel(Graph& G, unsigned* h_dist, unsigned* d_dist, uint64_t _src, unsigned num_blocks, unsigned block_size) { hipLaunchKernelGGL(( initialize), dim3(num_blocks), dim3(block_size), 0, 0, G, d_dist, G.h_nnodes); hipDeviceSynchronize(); bool h_changed, *d_changed; gpuErrchk(hipMalloc(&d_changed,sizeof(bool))); unsigned zero = 0; // the distance zero from source uint64_t src = _src; // setting the source vertex by specifying the node-id. gpuErrchk(hipMemcpy(&d_dist[src],&zero, sizeof(zero), hipMemcpyHostToDevice)); do { h_changed = false; gpuErrchk(hipMemcpy(d_changed, &h_changed, sizeof(h_changed), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( ssspCompute) , dim3(num_blocks), dim3(block_size), 0, 0, G, G.h_nnodes, G.h_nedges, d_dist, d_changed ); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError() ); gpuErrchk(hipMemcpy(&h_changed, d_changed, sizeof(h_changed), hipMemcpyDeviceToHost)); } while(h_changed); gpuErrchk(hipMemcpy(h_dist, d_dist, G.h_nnodes * sizeof(unsigned), hipMemcpyDeviceToHost)); } __global__ void levelInit(int * d_level, uint64_t nnodes) { uint64_t gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid >= nnodes) return; d_level[gid] = INF_NEW; } __global__ void populateDegree(Graph G,int * d_nodeDegree, uint64_t* d_nodeId, uint64_t nnodes) { uint64_t gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid >= nnodes) return; d_nodeDegree[gid] = G.getDegree(gid); d_nodeId[gid] = gid; } void merge(int* a, int* b, uint64_t* c, uint64_t * c_aux, uint64_t lo, uint64_t mid, uint64_t hi, uint64_t n) { if (mid >= n) return; if (hi > n) hi = n; int i = lo, j = mid, ii = lo, jj = mid, k; for (k = lo; k < hi; k++) { if (i == mid) { b[k] = a[j++]; c_aux[k] = c[jj++]; } else if (j == hi) { b[k] = a[i++]; c_aux[k] = c[ii++]; } else if (a[j] > a[i]) { b[k] = a[j++]; c_aux[k] = c[jj++]; }// '>' means descending order else { b[k] = a[i++]; c_aux[k] = c[ii++]; } } // copy back for (k = lo; k < hi; k++) { a[k] = b[k]; c[k] = c_aux[k]; } } void Merge_Sort_Par(int *a,int *b,uint64_t *c, uint64_t* c_aux, uint64_t n) //, int nThreads) { omp_set_num_threads(16); uint64_t blockSize, start; for(blockSize=1;blockSize<n; blockSize=blockSize+blockSize){ #pragma omp parallel for private(start) schedule(static) for(start=0; start < n; start += blockSize + blockSize){ // std::cout << "Get num threads " << omp_get_num_threads() << std::endl; merge(a, b, c, c_aux, start, start+blockSize, start + 2*blockSize, n); } } } /*renumber and replicate the nodes */ void renumber_replicate(Graph& G) { /* Step-1: renumber the nodes */ // store the nodes' degrees in an array and sort the array in descending order int * h_nodeDegree = (int*) malloc(G.h_nnodes*sizeof(int)); int * h_nodeDegree_aux = (int*) malloc(G.h_nnodes*sizeof(int)); // this is for the merge sort int * d_nodeDegree; uint64_t * h_nodeId = (uint64_t*) malloc(G.h_nnodes*sizeof(uint64_t)); uint64_t * h_nodeId_aux = (uint64_t*) malloc(G.h_nnodes*sizeof(uint64_t)); uint64_t * d_nodeId; gpuErrchk(hipMalloc(&d_nodeId,G.h_nnodes*sizeof(uint64_t))); gpuErrchk(hipMalloc(&d_nodeDegree,G.h_nnodes*sizeof(int))); unsigned blockSize = 256; unsigned numBlocks = (G.h_nnodes+blockSize-1)/blockSize; CPUTimer cputimer; cputimer.Start(); hipLaunchKernelGGL(( populateDegree), dim3(numBlocks), dim3(blockSize), 0, 0, G, d_nodeDegree,d_nodeId,G.h_nnodes); gpuErrchk(hipMemcpy(h_nodeDegree, d_nodeDegree, G.h_nnodes*sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_nodeId, d_nodeId, G.h_nnodes*sizeof(uint64_t), hipMemcpyDeviceToHost)); gpuErrchk(hipFree(d_nodeDegree)); gpuErrchk(hipFree(d_nodeId)); // sort the nodes in descending order and maintain another array to store the corresponding node id Merge_Sort_Par(h_nodeDegree, h_nodeDegree_aux, h_nodeId, h_nodeId_aux, G.h_nnodes); cputimer.Stop(); free(h_nodeDegree_aux); free(h_nodeId_aux); std::cout << "Time elapsed = " << cputimer.Elapsed() << " second" << std::endl; #if 1 int *h_level = (int*) malloc(G.h_nnodes*sizeof(int)); int * d_level; gpuErrchk(hipMalloc(&d_level,G.h_nnodes*sizeof(int))); cputimer.Start(); hipLaunchKernelGGL(( levelInit), dim3(numBlocks),dim3(blockSize), 0, 0, d_level,G.h_nnodes); gpuErrchk(hipMemcpy(h_level, d_level, G.h_nnodes*sizeof(int), hipMemcpyDeviceToHost)); // initializing h_level for the first iteration bool h_changed, *d_changed; gpuErrchk(hipMalloc(&d_changed,sizeof(bool))); int zero = 0; // the distance zero from source uint64_t src; for(uint64_t j = 0; j < G.h_nnodes; ++j) { src = h_nodeId[j]; if(h_nodeDegree[j] == 0) { std::cout << "Nodes with degree 0 start at: " << j << std::endl; std::cout << "Number of nodes with degree 0 : " << G.h_nnodes-1-j << std::endl; break; } if(h_level[src] == INF_NEW ) { gpuErrchk(hipMemcpy(&d_level[src],&zero, sizeof(zero), hipMemcpyHostToDevice)); do { h_changed = false; gpuErrchk(hipMemcpy(d_changed, &h_changed, sizeof(h_changed), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( getLevel), dim3(numBlocks),dim3(blockSize), 0, 0, G, G.h_nnodes, G.h_nedges, d_level, d_changed); // making it true all the time, so getting stuck in an infinite loop hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError() ); gpuErrchk(hipMemcpy(&h_changed, d_changed, sizeof(h_changed), hipMemcpyDeviceToHost)); } while(h_changed); gpuErrchk(hipMemcpy(h_level, d_level, G.h_nnodes*sizeof(int), hipMemcpyDeviceToHost)); } } // assigning level 0 to nodes that have not been reached so far.. i.e., these are unreachable for(uint64_t s=0; s<G.h_nnodes; ++s) { // h_level[s] = (h_level[s] != INF_NEW) * h_level[s]; // this is optimal if(h_level[s] == INF_NEW) { h_level[s] = 0; } } cputimer.Stop(); std::cout << "Time elapsed in assigning levels = " << cputimer.Elapsed() << " second" << std::endl; #endif // counting the number of nodes of each type std::unordered_map<int,uint64_t> countPerLevel; // map of level:# nodes at that level for(uint64_t i=0; i<G.h_nnodes; ++i) { countPerLevel[h_level[i]]++; } int chunkSize = 32; // specifying the chunk size std::cout << "Level : #nodes ; #holes" << std::endl; unsigned holeSum = 0; for(auto it=countPerLevel.begin(); it != countPerLevel.end(); ++it) { int temp = chunkSize - ( (it->second) % chunkSize ); std::cout << it->first << " : " << it->second << " ; " << temp << std::endl; holeSum += temp; } std::cout << "total holes : " << holeSum << std::endl; uint64_t * h_newId = (uint64_t*) malloc(sizeof(uint64_t)*G.h_nnodes); // stores the new id of the node, i.e., newId[i] = j means that new id assigned to nodes 'i' is 'j'. // Step-1 : Assign the new id's to the nodes at level 0. int maxLevel = 0; uint64_t seqNum = 0; // the new id assigned to the nodes for(uint64_t s = 0; s < G.h_nnodes; ++s) { if(h_level[s] == 0) { h_newId[s] = seqNum++; } else { maxLevel = max(h_level[s], maxLevel); // finding the number of levels in the bfs forest } } // Step-2 : Assign the new id's to the nodes at each level in a level-synchronous manner seqNum = seqNum + ( chunkSize - (seqNum % chunkSize) ); // bump-up seqNum to the next multple of chunkSize // writing output to a file (for correctness check) const char filename[] = "bfs_output.txt"; printf("Writing output to %s\n", filename); FILE *o = fopen(filename, "w"); for(uint64_t i = 0; i < G.h_nnodes; i++) { fprintf(o, "%d: %d\n", i, h_level[i]); } fclose(o); } // end of function __global__ void getLevel(Graph G, uint64_t nnodes, uint64_t nedges, int* d_level, bool* d_changed) { uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; uint64_t src = gid; // node under consideration if(src >= nnodes) return; // exit the kernel unsigned outDegree = G.getDegree(src); for(unsigned i=0; i<outDegree; ++i) { uint64_t dst = G. getDest(src,i); // get the i-th neighbor of src if(dst >= nnodes){ return; } // unsigned wt = 1; // the edge weight is 1 int altdist = d_level[src] + 1; // each edge has weight = 1 if(altdist < d_level[dst]) { // a possible site for thread divergence int olddist = atomicMin(&d_level[dst], altdist); if(altdist < olddist) (*d_changed) = true; // dist is updated to a lower value (another possible site for thread divergence) } } }
a0e5a0d6b3a8e0c71acc419bae3828fecb4bbd04.cu
#include <stdio.h> #include <cuda_runtime.h> #include <cuda.h> #include "graph.h" #include "sssp-coalescing.h" #include "timer.h" #include <queue> #include <omp.h> #include <unordered_map> //#define INF 1000000000 __constant__ unsigned INF = 1000000000; const int INF_NEW = 1000000; __global__ void initialize(Graph G, unsigned* d_dist, uint64_t nnodes){ uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; if(gid < nnodes) { d_dist[gid] = INF; } } __device__ bool processedge(Graph& G, uint64_t nnodes, unsigned* d_dist, uint64_t worknode, unsigned i, uint64_t& dst) { dst = G. getDest(worknode,i); // get the i-th neighbor of worknode if(dst >= nnodes) return false; unsigned wt = G.getWt(worknode, i); // get edge-weight of the i-th edge if(wt >= INF) return false; unsigned altdist = d_dist[worknode] + wt; if(altdist < d_dist[dst]) { // a possible site for thread divergence unsigned olddist = atomicMin(&d_dist[dst], altdist); if(altdist < olddist) return true; // dist is updated to a lower value (another possible site for thread divergence) } return false; } __device__ bool processnode(Graph& G, uint64_t nnodes, unsigned* d_dist, uint64_t worknode) { if(worknode >= nnodes) return false; bool changed = false; // thread-local unsigned outDegree = G.getDegree(worknode); for(unsigned i=0; i<outDegree; ++i) { uint64_t dst = nnodes; unsigned olddist = processedge(G, nnodes, d_dist, worknode, i, dst); if(olddist) changed = true; } return changed; } __global__ void ssspCompute(Graph G, uint64_t nnodes, uint64_t nedges, unsigned* d_dist, bool* d_changed) { uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; uint64_t src = gid; // node under consideration if(processnode(G, nnodes, d_dist, src)) *d_changed = true; } void sssp_parallel(Graph& G, unsigned* h_dist, unsigned* d_dist, uint64_t _src, unsigned num_blocks, unsigned block_size) { initialize<<<num_blocks, block_size>>>(G, d_dist, G.h_nnodes); cudaDeviceSynchronize(); bool h_changed, *d_changed; gpuErrchk(cudaMalloc(&d_changed,sizeof(bool))); unsigned zero = 0; // the distance zero from source uint64_t src = _src; // setting the source vertex by specifying the node-id. gpuErrchk(cudaMemcpy(&d_dist[src],&zero, sizeof(zero), cudaMemcpyHostToDevice)); do { h_changed = false; gpuErrchk(cudaMemcpy(d_changed, &h_changed, sizeof(h_changed), cudaMemcpyHostToDevice)); ssspCompute <<<num_blocks, block_size>>> (G, G.h_nnodes, G.h_nedges, d_dist, d_changed ); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError() ); gpuErrchk(cudaMemcpy(&h_changed, d_changed, sizeof(h_changed), cudaMemcpyDeviceToHost)); } while(h_changed); gpuErrchk(cudaMemcpy(h_dist, d_dist, G.h_nnodes * sizeof(unsigned), cudaMemcpyDeviceToHost)); } __global__ void levelInit(int * d_level, uint64_t nnodes) { uint64_t gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid >= nnodes) return; d_level[gid] = INF_NEW; } __global__ void populateDegree(Graph G,int * d_nodeDegree, uint64_t* d_nodeId, uint64_t nnodes) { uint64_t gid = threadIdx.x + blockIdx.x * blockDim.x; if(gid >= nnodes) return; d_nodeDegree[gid] = G.getDegree(gid); d_nodeId[gid] = gid; } void merge(int* a, int* b, uint64_t* c, uint64_t * c_aux, uint64_t lo, uint64_t mid, uint64_t hi, uint64_t n) { if (mid >= n) return; if (hi > n) hi = n; int i = lo, j = mid, ii = lo, jj = mid, k; for (k = lo; k < hi; k++) { if (i == mid) { b[k] = a[j++]; c_aux[k] = c[jj++]; } else if (j == hi) { b[k] = a[i++]; c_aux[k] = c[ii++]; } else if (a[j] > a[i]) { b[k] = a[j++]; c_aux[k] = c[jj++]; }// '>' means descending order else { b[k] = a[i++]; c_aux[k] = c[ii++]; } } // copy back for (k = lo; k < hi; k++) { a[k] = b[k]; c[k] = c_aux[k]; } } void Merge_Sort_Par(int *a,int *b,uint64_t *c, uint64_t* c_aux, uint64_t n) //, int nThreads) { omp_set_num_threads(16); uint64_t blockSize, start; for(blockSize=1;blockSize<n; blockSize=blockSize+blockSize){ #pragma omp parallel for private(start) schedule(static) for(start=0; start < n; start += blockSize + blockSize){ // std::cout << "Get num threads " << omp_get_num_threads() << std::endl; merge(a, b, c, c_aux, start, start+blockSize, start + 2*blockSize, n); } } } /*renumber and replicate the nodes */ void renumber_replicate(Graph& G) { /* Step-1: renumber the nodes */ // store the nodes' degrees in an array and sort the array in descending order int * h_nodeDegree = (int*) malloc(G.h_nnodes*sizeof(int)); int * h_nodeDegree_aux = (int*) malloc(G.h_nnodes*sizeof(int)); // this is for the merge sort int * d_nodeDegree; uint64_t * h_nodeId = (uint64_t*) malloc(G.h_nnodes*sizeof(uint64_t)); uint64_t * h_nodeId_aux = (uint64_t*) malloc(G.h_nnodes*sizeof(uint64_t)); uint64_t * d_nodeId; gpuErrchk(cudaMalloc(&d_nodeId,G.h_nnodes*sizeof(uint64_t))); gpuErrchk(cudaMalloc(&d_nodeDegree,G.h_nnodes*sizeof(int))); unsigned blockSize = 256; unsigned numBlocks = (G.h_nnodes+blockSize-1)/blockSize; CPUTimer cputimer; cputimer.Start(); populateDegree<<<numBlocks, blockSize>>>(G, d_nodeDegree,d_nodeId,G.h_nnodes); gpuErrchk(cudaMemcpy(h_nodeDegree, d_nodeDegree, G.h_nnodes*sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_nodeId, d_nodeId, G.h_nnodes*sizeof(uint64_t), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(d_nodeDegree)); gpuErrchk(cudaFree(d_nodeId)); // sort the nodes in descending order and maintain another array to store the corresponding node id Merge_Sort_Par(h_nodeDegree, h_nodeDegree_aux, h_nodeId, h_nodeId_aux, G.h_nnodes); cputimer.Stop(); free(h_nodeDegree_aux); free(h_nodeId_aux); std::cout << "Time elapsed = " << cputimer.Elapsed() << " second" << std::endl; #if 1 int *h_level = (int*) malloc(G.h_nnodes*sizeof(int)); int * d_level; gpuErrchk(cudaMalloc(&d_level,G.h_nnodes*sizeof(int))); cputimer.Start(); levelInit<<<numBlocks,blockSize>>>(d_level,G.h_nnodes); gpuErrchk(cudaMemcpy(h_level, d_level, G.h_nnodes*sizeof(int), cudaMemcpyDeviceToHost)); // initializing h_level for the first iteration bool h_changed, *d_changed; gpuErrchk(cudaMalloc(&d_changed,sizeof(bool))); int zero = 0; // the distance zero from source uint64_t src; for(uint64_t j = 0; j < G.h_nnodes; ++j) { src = h_nodeId[j]; if(h_nodeDegree[j] == 0) { std::cout << "Nodes with degree 0 start at: " << j << std::endl; std::cout << "Number of nodes with degree 0 : " << G.h_nnodes-1-j << std::endl; break; } if(h_level[src] == INF_NEW ) { gpuErrchk(cudaMemcpy(&d_level[src],&zero, sizeof(zero), cudaMemcpyHostToDevice)); do { h_changed = false; gpuErrchk(cudaMemcpy(d_changed, &h_changed, sizeof(h_changed), cudaMemcpyHostToDevice)); getLevel<<<numBlocks,blockSize>>>(G, G.h_nnodes, G.h_nedges, d_level, d_changed); // making it true all the time, so getting stuck in an infinite loop cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError() ); gpuErrchk(cudaMemcpy(&h_changed, d_changed, sizeof(h_changed), cudaMemcpyDeviceToHost)); } while(h_changed); gpuErrchk(cudaMemcpy(h_level, d_level, G.h_nnodes*sizeof(int), cudaMemcpyDeviceToHost)); } } // assigning level 0 to nodes that have not been reached so far.. i.e., these are unreachable for(uint64_t s=0; s<G.h_nnodes; ++s) { // h_level[s] = (h_level[s] != INF_NEW) * h_level[s]; // this is optimal if(h_level[s] == INF_NEW) { h_level[s] = 0; } } cputimer.Stop(); std::cout << "Time elapsed in assigning levels = " << cputimer.Elapsed() << " second" << std::endl; #endif // counting the number of nodes of each type std::unordered_map<int,uint64_t> countPerLevel; // map of level:# nodes at that level for(uint64_t i=0; i<G.h_nnodes; ++i) { countPerLevel[h_level[i]]++; } int chunkSize = 32; // specifying the chunk size std::cout << "Level : #nodes ; #holes" << std::endl; unsigned holeSum = 0; for(auto it=countPerLevel.begin(); it != countPerLevel.end(); ++it) { int temp = chunkSize - ( (it->second) % chunkSize ); std::cout << it->first << " : " << it->second << " ; " << temp << std::endl; holeSum += temp; } std::cout << "total holes : " << holeSum << std::endl; uint64_t * h_newId = (uint64_t*) malloc(sizeof(uint64_t)*G.h_nnodes); // stores the new id of the node, i.e., newId[i] = j means that new id assigned to nodes 'i' is 'j'. // Step-1 : Assign the new id's to the nodes at level 0. int maxLevel = 0; uint64_t seqNum = 0; // the new id assigned to the nodes for(uint64_t s = 0; s < G.h_nnodes; ++s) { if(h_level[s] == 0) { h_newId[s] = seqNum++; } else { maxLevel = max(h_level[s], maxLevel); // finding the number of levels in the bfs forest } } // Step-2 : Assign the new id's to the nodes at each level in a level-synchronous manner seqNum = seqNum + ( chunkSize - (seqNum % chunkSize) ); // bump-up seqNum to the next multple of chunkSize // writing output to a file (for correctness check) const char filename[] = "bfs_output.txt"; printf("Writing output to %s\n", filename); FILE *o = fopen(filename, "w"); for(uint64_t i = 0; i < G.h_nnodes; i++) { fprintf(o, "%d: %d\n", i, h_level[i]); } fclose(o); } // end of function __global__ void getLevel(Graph G, uint64_t nnodes, uint64_t nedges, int* d_level, bool* d_changed) { uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; uint64_t src = gid; // node under consideration if(src >= nnodes) return; // exit the kernel unsigned outDegree = G.getDegree(src); for(unsigned i=0; i<outDegree; ++i) { uint64_t dst = G. getDest(src,i); // get the i-th neighbor of src if(dst >= nnodes){ return; } // unsigned wt = 1; // the edge weight is 1 int altdist = d_level[src] + 1; // each edge has weight = 1 if(altdist < d_level[dst]) { // a possible site for thread divergence int olddist = atomicMin(&d_level[dst], altdist); if(altdist < olddist) (*d_changed) = true; // dist is updated to a lower value (another possible site for thread divergence) } } }
afa58c64154f5bc7832ff5de83b345abef3d0ae6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include "kernel/GenMipMaps.h" #include "cuda/helper_math.h" #include "cuda/cudamemory.h" #include "cuda/cudautil.h" // NOTE // http://www.cse.uaa.alaska.edu/~ssiewert/a490dmis_code/CUDA/cuda_work/samples/2_Graphics/bindlessTexture/bindlessTexture_kernel.cu __global__ void genMipmap( hipSurfaceObject_t mipOutput, hipTextureObject_t mipInput, int32_t imageW, int32_t imageH) { int32_t x = blockIdx.x * blockDim.x + threadIdx.x; int32_t y = blockIdx.y * blockDim.y + threadIdx.y; float px = 1.0 / float(imageW); float py = 1.0 / float(imageH); if ((x < imageW) && (y < imageH)) { // take the average of 4 samples // we are using the normalized access to make sure non-power-of-two textures // behave well when downsized. float4 color = (tex2D<float4>(mipInput, (x + 0) * px, (y + 0) * py)) + (tex2D<float4>(mipInput, (x + 1) * px, (y + 0) * py)) + (tex2D<float4>(mipInput, (x + 1) * px, (y + 1) * py)) + (tex2D<float4>(mipInput, (x + 0) * px, (y + 1) * py)); color /= 4.0f; surf2Dwrite(color, mipOutput, x * sizeof(float4), y); } } namespace idaten { void generateMipMaps( hipMipmappedArray_t mipmapArray, int32_t width, int32_t height, int32_t maxLevel) { int32_t level = 0; //while (width != 1 || height != 1) while (level + 1 < maxLevel) { width /= 2; height /= 2; width = ::max(1, width); height = ::max(1, height); // Copy from. hipArray_t levelFrom; checkCudaErrors(hipGetMipmappedArrayLevel(&levelFrom, mipmapArray, level)); // Copy to. hipArray_t levelTo; checkCudaErrors(hipGetMipmappedArrayLevel(&levelTo, mipmapArray, level + 1)); hipExtent levelToSize; checkCudaErrors(hipArrayGetInfo(nullptr, &levelToSize, nullptr, levelTo)); AT_ASSERT(levelToSize.width == width); AT_ASSERT(levelToSize.height == height); AT_ASSERT(levelToSize.depth == 0); // generate texture object for reading hipTextureObject_t texInput; { hipResourceDesc texRes; { memset(&texRes, 0, sizeof(hipResourceDesc)); texRes.resType = hipResourceTypeArray; texRes.res.array.array = levelFrom; } hipTextureDesc texDesc; { memset(&texDesc, 0, sizeof(hipTextureDesc)); texDesc.normalizedCoords = 1; texDesc.filterMode = hipFilterModeLinear; texDesc.addressMode[0] = hipAddressModeClamp; texDesc.addressMode[1] = hipAddressModeClamp; texDesc.addressMode[2] = hipAddressModeClamp; texDesc.readMode = hipReadModeElementType; } checkCudaErrors(hipCreateTextureObject(&texInput, &texRes, &texDesc, nullptr)); } // generate surface object for writing hipSurfaceObject_t surfOutput; { hipResourceDesc surfRes; { memset(&surfRes, 0, sizeof(hipResourceDesc)); surfRes.resType = hipResourceTypeArray; surfRes.res.array.array = levelTo; } checkCudaErrors(hipCreateSurfaceObject(&surfOutput, &surfRes)); } // run mipmap kernel dim3 block(16, 16, 1); dim3 grid( (width + block.x - 1) / block.x, (height + block.y - 1) / block.y, 1); genMipmap << <grid, block >> > ( surfOutput, texInput, width, height); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDestroySurfaceObject(surfOutput)); checkCudaErrors(hipDestroyTextureObject(texInput)); level++; } } }
afa58c64154f5bc7832ff5de83b345abef3d0ae6.cu
#include <algorithm> #include "kernel/GenMipMaps.h" #include "cuda/helper_math.h" #include "cuda/cudamemory.h" #include "cuda/cudautil.h" // NOTE // http://www.cse.uaa.alaska.edu/~ssiewert/a490dmis_code/CUDA/cuda_work/samples/2_Graphics/bindlessTexture/bindlessTexture_kernel.cu __global__ void genMipmap( cudaSurfaceObject_t mipOutput, cudaTextureObject_t mipInput, int32_t imageW, int32_t imageH) { int32_t x = blockIdx.x * blockDim.x + threadIdx.x; int32_t y = blockIdx.y * blockDim.y + threadIdx.y; float px = 1.0 / float(imageW); float py = 1.0 / float(imageH); if ((x < imageW) && (y < imageH)) { // take the average of 4 samples // we are using the normalized access to make sure non-power-of-two textures // behave well when downsized. float4 color = (tex2D<float4>(mipInput, (x + 0) * px, (y + 0) * py)) + (tex2D<float4>(mipInput, (x + 1) * px, (y + 0) * py)) + (tex2D<float4>(mipInput, (x + 1) * px, (y + 1) * py)) + (tex2D<float4>(mipInput, (x + 0) * px, (y + 1) * py)); color /= 4.0f; surf2Dwrite(color, mipOutput, x * sizeof(float4), y); } } namespace idaten { void generateMipMaps( cudaMipmappedArray_t mipmapArray, int32_t width, int32_t height, int32_t maxLevel) { int32_t level = 0; //while (width != 1 || height != 1) while (level + 1 < maxLevel) { width /= 2; height /= 2; width = std::max(1, width); height = std::max(1, height); // Copy from. cudaArray_t levelFrom; checkCudaErrors(cudaGetMipmappedArrayLevel(&levelFrom, mipmapArray, level)); // Copy to. cudaArray_t levelTo; checkCudaErrors(cudaGetMipmappedArrayLevel(&levelTo, mipmapArray, level + 1)); cudaExtent levelToSize; checkCudaErrors(cudaArrayGetInfo(nullptr, &levelToSize, nullptr, levelTo)); AT_ASSERT(levelToSize.width == width); AT_ASSERT(levelToSize.height == height); AT_ASSERT(levelToSize.depth == 0); // generate texture object for reading cudaTextureObject_t texInput; { cudaResourceDesc texRes; { memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = levelFrom; } cudaTextureDesc texDesc; { memset(&texDesc, 0, sizeof(cudaTextureDesc)); texDesc.normalizedCoords = 1; texDesc.filterMode = cudaFilterModeLinear; texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.addressMode[2] = cudaAddressModeClamp; texDesc.readMode = cudaReadModeElementType; } checkCudaErrors(cudaCreateTextureObject(&texInput, &texRes, &texDesc, nullptr)); } // generate surface object for writing cudaSurfaceObject_t surfOutput; { cudaResourceDesc surfRes; { memset(&surfRes, 0, sizeof(cudaResourceDesc)); surfRes.resType = cudaResourceTypeArray; surfRes.res.array.array = levelTo; } checkCudaErrors(cudaCreateSurfaceObject(&surfOutput, &surfRes)); } // run mipmap kernel dim3 block(16, 16, 1); dim3 grid( (width + block.x - 1) / block.x, (height + block.y - 1) / block.y, 1); genMipmap << <grid, block >> > ( surfOutput, texInput, width, height); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDestroySurfaceObject(surfOutput)); checkCudaErrors(cudaDestroyTextureObject(texInput)); level++; } } }
fe4c843579409c7c0495be0267560b19cb1f91fb.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/job/parallel_desc.h" #include "oneflow/core/kernel/cuda_graph_support.h" #include "oneflow/user/kernels/cublas_fused_mlp_util.cuh" #include "oneflow/core/ep/include/primitive/fill.h" #include "oneflow/core/device/nccl_util.h" #include "oneflow/core/job/eager_nccl_comm_manager.h" // CUBLAS_AUX_EPILOGUE only support in cuda11.4 or higher version, in cuda11.4 it need static link. #if TORCH_HIP_VERSION >= 11060 namespace oneflow { namespace { struct Comm { Comm(ncclComm_t comm) : comm(comm) {} ncclComm_t comm; }; class MatmulGradKernelState final : public user_op::OpKernelState { public: MatmulGradKernelState(user_op::KernelInitContext* ctx) : if_need_comm_(false), stream_name_(EagerNcclCommMgr::kDefaultStreamName) { OF_CUDA_CHECK(hipStreamCreate(&cuda_stream_)); OF_CUDA_CHECK(hipStreamCreate(&allreduce_stream_)); OF_CUBLAS_CHECK(cublasLtCreate(&cublas_lt_handle_)); workspace_size_ = ParseIntegerFromEnv("ONEFLOW_EP_CUDA_CUBLAS_WORKSPACE_SIZE_MB", kDefaultWorkspaceSizeMb) * 1024 * 1024; OF_CUDA_CHECK(hipMalloc(&workspace_, workspace_size_)); if (ctx->parallel_ctx().parallel_num() > 1) { parallel_conf_ = ctx->parallel_desc().parallel_conf(); } } ~MatmulGradKernelState() { OF_CUDA_CHECK(hipStreamSynchronize(cuda_stream_)); OF_CUBLAS_CHECK(cublasLtDestroy(cublas_lt_handle_)); OF_CUDA_CHECK(hipStreamDestroy(cuda_stream_)); OF_CUDA_CHECK(hipStreamSynchronize(allreduce_stream_)); OF_CUDA_CHECK(hipStreamDestroy(allreduce_stream_)); OF_CUDA_CHECK(hipFree(workspace_)); } hipStream_t grad_cuda_stream() const { return cuda_stream_; } hipStream_t allreduce_stream() const { return allreduce_stream_; } cublasLtHandle_t cublas_lt_handle() const { return cublas_lt_handle_; } size_t cublas_workspace_size() const { return workspace_size_; } void* cublas_workspace() const { return workspace_; } bool IfCommCreate() const { if (!comm_) { return false; } return true; } bool IfNeedComm() const { return if_need_comm_; } ncclComm_t comm() { return GetOrCreate().comm; } const Comm& GetOrCreate() { if (!comm_) { InitCommMgr(); } return *comm_; } void InitNeedComm(user_op::KernelInitContext* ctx) { if_need_comm_ = false; if (ctx->parallel_ctx().parallel_num() > 1) { const int64_t d_weights_size = ctx->output_size("d_weights"); if (ctx->SbpParallel4ArgNameAndIndex("d_weights", 0).has_broadcast_parallel()) { for (int i = 0; i < d_weights_size; i++) { CHECK(ctx->SbpParallel4ArgNameAndIndex("d_weights", i).has_broadcast_parallel()) << "All d_weight's SBP should be Broadcast. "; CHECK(ctx->SbpParallel4ArgNameAndIndex("d_biases", i).has_broadcast_parallel()) << "All d_bias's SBP should be Broadcast. "; } if (ctx->SbpParallel4ArgNameAndIndex("dy", 0).has_split_parallel()) { if_need_comm_ = true; } } } } void InitCommMgr() { std::set<std::pair<int64_t, int64_t>> device_set; const ParallelDesc parallel_desc(parallel_conf_); for (int64_t parallel_id = 0; parallel_id < parallel_desc.parallel_num(); ++parallel_id) { int64_t machine_id = CHECK_JUST(parallel_desc.MachineId4ParallelId(parallel_id)); int64_t device_id = CHECK_JUST(parallel_desc.DeviceId4ParallelId(parallel_id)); device_set.emplace(std::make_pair(machine_id, device_id)); } EagerCclCommMgr* comm_mgr = CHECK_NOTNULL(Singleton<EagerCclCommMgr>::Get()); ncclComm_t comm; comm = comm_mgr->As<EagerNcclCommMgr>()->GetCommForDeviceAndStreamName(device_set, stream_name_); comm_.reset(new Comm(comm)); } private: hipStream_t cuda_stream_{}; hipStream_t allreduce_stream_{}; cublasLtHandle_t cublas_lt_handle_{}; void* workspace_{}; size_t workspace_size_; std::string stream_name_; std::unique_ptr<Comm> comm_; bool if_need_comm_; ParallelConf parallel_conf_; }; template<typename T> class CublasFusedMLPGradKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport { public: CublasFusedMLPGradKernel() { OF_CUDA_CHECK(hipEventCreate(&main_stream_event_)); OF_CUDA_CHECK(hipEventCreate(&async_weight_grad_event_)); OF_CUDA_CHECK(hipEventCreate(&dweight_event_)); OF_CUDA_CHECK(hipEventCreate(&allreduce_event_)); }; ~CublasFusedMLPGradKernel() override { OF_CUDA_CHECK(hipEventDestroy(main_stream_event_)); OF_CUDA_CHECK(hipEventDestroy(async_weight_grad_event_)); OF_CUDA_CHECK(hipEventDestroy(dweight_event_)); OF_CUDA_CHECK(hipEventDestroy(allreduce_event_)); }; std::shared_ptr<user_op::OpKernelCache> InitOpKernelCache( user_op::KernelCacheContext* ctx) const override { return CreateCublasFusedMLPKernelCache(); } std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { std::shared_ptr<MatmulGradKernelState> kernel_state = std::make_shared<MatmulGradKernelState>(ctx); kernel_state->InitNeedComm(ctx); return kernel_state; } private: hipEvent_t main_stream_event_; hipEvent_t async_weight_grad_event_; hipEvent_t dweight_event_; hipEvent_t allreduce_event_; bool IsReadyForCapture(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state, const user_op::OpKernelCache* cache) const override { auto* kernel_state = dynamic_cast<MatmulGradKernelState*>(state); if (kernel_state->IfNeedComm()) { return kernel_state->IfCommCreate(); } else { return true; } } using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state, const user_op::OpKernelCache* cache) const override { const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); int64_t tmp_buf_elem_cnt = tmp_buffer->shape_view().elem_cnt(); const int64_t weight_num = ctx->input_size("weights"); user_op::Tensor* d_x = ctx->Tensor4ArgNameAndIndex("d_x", 0); const std::vector<float> alpha_list = ctx->Attr<std::vector<float>>("alpha_list"); auto* kernel_state = dynamic_cast<MatmulGradKernelState*>(state); const auto* matmul_grad_cache = CHECK_NOTNULL(dynamic_cast<const CublasFusedMLPKernelCache*>(cache)); ncclComm_t comm{}; bool if_need_comm = kernel_state->IfNeedComm(); if (if_need_comm) { comm = kernel_state->comm(); } void* dy_tmp_buf = tmp_buffer->mut_dptr(); size_t tmp_buf_offset = 0; auto* cuda_stream = ctx->stream()->As<ep::CudaStream>(); const DataType data_type = dy->data_type(); const hipblasComputeType_t cublas_compute_dtype = GetComputeType(data_type); const hipDataType cuda_data_type = GetCudaDataType(data_type); size_t cublas_m = 0, cublas_n = 0, cublas_k = 0; int64_t cublas_lda = 0, cublas_ldb = 0, cublas_ldc = 0; const double alpha_one = 1.0; auto sp_alpha_one = GetCublasScalarParameter(alpha_one, cublas_compute_dtype); double alpha = 1.0; auto sp_alpha = GetCublasScalarParameter(alpha, cublas_compute_dtype); double beta = 0.0; auto sp_beta = GetCublasScalarParameter(beta, cublas_compute_dtype); cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DEFAULT; // currently only support 2D matmul. DimVector weight_shape(2); DimVector hidden_shape(2); DimVector dy_shape(2); dy->shape_view().ToDimVector(&dy_shape); const void* dgrad_buf = dy->dptr(); const int64_t batch_size = dy->shape_view().At(0); const void* ones = nullptr; ep::CudaDevice* cuda_device = dynamic_cast<ep::CudaDevice*>(ctx->stream()->device()); CHECK_NOTNULL(cuda_device); ones = cuda_device->GetConstOnes(dy->data_type(), batch_size); if (ones == nullptr) { std::unique_ptr<ep::primitive::Fill> fill = ep::primitive::NewPrimitive<ep::primitive::FillFactory>(ctx->stream()->device_type(), data_type); CHECK(fill); fill->Launch(ctx->stream(), tmp_buffer->mut_dptr(), 1.0, batch_size); ones = tmp_buffer->mut_dptr(); tmp_buf_offset += GetCudaAlignedSize(batch_size * sizeof(T)); dy_tmp_buf = reinterpret_cast<void*>(tmp_buffer->mut_dptr<char>() + tmp_buf_offset); } for (int idx = weight_num - 1; idx >= 0; idx--) { const user_op::Tensor* weight = ctx->Tensor4ArgNameAndIndex("weights", idx); weight->shape_view().ToDimVector(&weight_shape); InferMatmulCublasMNK(dy_shape, weight_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); if (idx != 0) { alpha = alpha_list.at(idx - 1); sp_alpha = GetCublasScalarParameter(alpha, cublas_compute_dtype); const user_op::Tensor* aux = ctx->Tensor4ArgNameAndIndex("cublas_aux", idx - 1); user_op::Tensor* d_bias = ctx->Tensor4ArgNameAndIndex("d_biases", idx - 1); epilogue = CUBLASLT_EPILOGUE_DRELU_BGRAD; SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/true, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, d_bias->mut_dptr(), aux->dptr(), cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); /* a = dy, b = weight cublas_a=weight, cublas_b=dy */ OF_CUDA_CHECK(hipEventRecord(main_stream_event_, cuda_stream->cuda_stream())); OF_CUBLAS_CHECK(cublasLtMatmul( cuda_stream->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha, weight->dptr(), matmul_grad_cache->cublas_a_desc, dgrad_buf, matmul_grad_cache->cublas_b_desc, &sp_beta, dy_tmp_buf, matmul_grad_cache->cublas_c_desc, dy_tmp_buf, matmul_grad_cache->cublas_c_desc, nullptr, cuda_stream->cublas_workspace(), cuda_stream->cublas_workspace_size(), cuda_stream->cuda_stream())); } else { epilogue = CUBLASLT_EPILOGUE_DEFAULT; SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/false, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, nullptr, nullptr, cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); /* a = dy, b = weight cublas_a=weight, cublas_b=dy */ OF_CUDA_CHECK(hipEventRecord(main_stream_event_, cuda_stream->cuda_stream())); OF_CUBLAS_CHECK(cublasLtMatmul( cuda_stream->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha_one, weight->dptr(), matmul_grad_cache->cublas_a_desc, dgrad_buf, matmul_grad_cache->cublas_b_desc, &sp_beta, d_x->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_x->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, cuda_stream->cublas_workspace(), cuda_stream->cublas_workspace_size(), cuda_stream->cuda_stream())); } // step1: Get last layer's dbias. if (idx == weight_num - 1) { user_op::Tensor* d_last_bias = ctx->Tensor4ArgNameAndIndex("d_biases", weight_num - 1); DimVector ones_buf_shape(2); ones_buf_shape.at(0) = 1; ones_buf_shape.at(1) = batch_size; epilogue = CUBLASLT_EPILOGUE_DEFAULT; InferMatmulCublasMNK(ones_buf_shape, dy_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/false, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, nullptr, nullptr, cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); OF_CUDA_CHECK(hipStreamWaitEvent(kernel_state->grad_cuda_stream(), main_stream_event_)); OF_CUBLAS_CHECK(cublasLtMatmul( kernel_state->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha_one, dgrad_buf, matmul_grad_cache->cublas_a_desc, ones, matmul_grad_cache->cublas_b_desc, &sp_beta, d_last_bias->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_last_bias->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, kernel_state->cublas_workspace(), kernel_state->cublas_workspace_size(), kernel_state->grad_cuda_stream())); } user_op::Tensor* d_weight = ctx->Tensor4ArgNameAndIndex("d_weights", idx); epilogue = CUBLASLT_EPILOGUE_DEFAULT; if (idx != 0) { const user_op::Tensor* hidden = ctx->Tensor4ArgNameAndIndex("hidden", idx - 1); // here hidden->shape_view().ToDimVector(&hidden_shape); InferMatmulCublasMNK(dy_shape, hidden_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::T, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/false, /*transpose_a=*/ep::primitive::BlasTransposeType::T, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, nullptr, nullptr, cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); if (idx != weight_num - 1) { // if idx == weight_num - 1, async_stream has wait main_stream_event_ in d_bias. OF_CUDA_CHECK(hipStreamWaitEvent(kernel_state->grad_cuda_stream(), main_stream_event_)); } OF_CUBLAS_CHECK(cublasLtMatmul( kernel_state->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha_one, hidden->dptr(), matmul_grad_cache->cublas_a_desc, dgrad_buf, matmul_grad_cache->cublas_b_desc, &sp_beta, d_weight->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_weight->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, kernel_state->cublas_workspace(), kernel_state->cublas_workspace_size(), kernel_state->grad_cuda_stream())); OF_CUDA_CHECK(hipEventRecord(dweight_event_, kernel_state->grad_cuda_stream())); // compute dy shape dy_shape.at(1) = weight_shape.at(1); // compute dybuf dgrad_buf = dy_tmp_buf; tmp_buf_offset += GetCudaAlignedSize(dy_shape.at(0) * dy_shape.at(1) * sizeof(T)); CHECK_LE(tmp_buf_offset, tmp_buf_elem_cnt) << "Tmp buffer offset should <= Tmp buffer elem_cnt. "; dy_tmp_buf = reinterpret_cast<void*>(tmp_buffer->mut_dptr<char>() + tmp_buf_offset); } else { x->shape_view().ToDimVector(&hidden_shape); InferMatmulCublasMNK(dy_shape, hidden_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::T, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/false, /*transpose_a=*/ep::primitive::BlasTransposeType::T, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, nullptr, nullptr, cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); OF_CUDA_CHECK(hipStreamWaitEvent(kernel_state->grad_cuda_stream(), main_stream_event_)); OF_CUBLAS_CHECK(cublasLtMatmul( kernel_state->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha_one, x->dptr(), matmul_grad_cache->cublas_a_desc, dgrad_buf, matmul_grad_cache->cublas_b_desc, &sp_beta, d_weight->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_weight->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, kernel_state->cublas_workspace(), kernel_state->cublas_workspace_size(), kernel_state->grad_cuda_stream())); OF_CUDA_CHECK(hipEventRecord(dweight_event_, kernel_state->grad_cuda_stream())); } if (if_need_comm) { // Do Allreduce for d_bias and d_weight. // Here we wait wgrad event, and set a ncclGroup to Allreduce d_bias and d_weight. OF_CUDA_CHECK(hipStreamWaitEvent(kernel_state->allreduce_stream(), dweight_event_)); OF_NCCL_CHECK(ncclGroupStart()); user_op::Tensor* allreduce_d_bias = ctx->Tensor4ArgNameAndIndex("d_biases", idx); OF_NCCL_CHECK(ncclAllReduce(allreduce_d_bias->mut_dptr(), allreduce_d_bias->mut_dptr(), allreduce_d_bias->shape_view().elem_cnt(), GetNcclDataType(allreduce_d_bias->data_type()), ncclRedOp_t::ncclSum, comm, kernel_state->allreduce_stream())); OF_NCCL_CHECK(ncclAllReduce(d_weight->mut_dptr(), d_weight->mut_dptr(), d_weight->shape_view().elem_cnt(), GetNcclDataType(d_weight->data_type()), ncclRedOp_t::ncclSum, comm, kernel_state->allreduce_stream())); OF_NCCL_CHECK(ncclGroupEnd()); if (idx == 0) { // We should sync allreduce before the kernel finish. OF_CUDA_CHECK(hipEventRecord(allreduce_event_, kernel_state->allreduce_stream())); } } } if (if_need_comm) { OF_CUDA_CHECK(hipStreamWaitEvent(cuda_stream->cuda_stream(), allreduce_event_)); } else { OF_CUDA_CHECK(hipStreamWaitEvent(cuda_stream->cuda_stream(), dweight_event_)); } }; bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_CUBLAS_FUSED_MLP_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("cublas_fused_mlp_grad") \ .SetCreateFn<CublasFusedMLPGradKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("x", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext* ctx) { \ const int64_t weight_num = ctx->input_size("weights"); \ const Shape& dy_shape = ctx->InputShape("dy", 0); \ int64_t m = dy_shape.At(0); \ int64_t k = dy_shape.At(1); \ int64_t tmp_buffer_size = 0; \ tmp_buffer_size += GetCudaAlignedSize(m * sizeof(dtype)); /*For last layer's bias grad*/ \ for (int idx = weight_num - 1; idx > 0; idx--) { \ const Shape& weight_shape = ctx->InputShape("weights", idx); \ k = weight_shape.At(1); \ tmp_buffer_size += GetCudaAlignedSize(m * k * sizeof(dtype)); \ } \ return tmp_buffer_size; \ }); REGISTER_CUBLAS_FUSED_MLP_GRAD_KERNEL(float) REGISTER_CUBLAS_FUSED_MLP_GRAD_KERNEL(double) REGISTER_CUBLAS_FUSED_MLP_GRAD_KERNEL(half) REGISTER_USER_KERNEL_UNIFIED_NCCL_COMM_INIT("cublas_fused_mlp_grad"); } // namespace } // namespace oneflow #endif // TORCH_HIP_VERSION >= 11060
fe4c843579409c7c0495be0267560b19cb1f91fb.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/job/parallel_desc.h" #include "oneflow/core/kernel/cuda_graph_support.h" #include "oneflow/user/kernels/cublas_fused_mlp_util.cuh" #include "oneflow/core/ep/include/primitive/fill.h" #include "oneflow/core/device/nccl_util.h" #include "oneflow/core/job/eager_nccl_comm_manager.h" // CUBLAS_AUX_EPILOGUE only support in cuda11.4 or higher version, in cuda11.4 it need static link. #if CUDA_VERSION >= 11060 namespace oneflow { namespace { struct Comm { Comm(ncclComm_t comm) : comm(comm) {} ncclComm_t comm; }; class MatmulGradKernelState final : public user_op::OpKernelState { public: MatmulGradKernelState(user_op::KernelInitContext* ctx) : if_need_comm_(false), stream_name_(EagerNcclCommMgr::kDefaultStreamName) { OF_CUDA_CHECK(cudaStreamCreate(&cuda_stream_)); OF_CUDA_CHECK(cudaStreamCreate(&allreduce_stream_)); OF_CUBLAS_CHECK(cublasLtCreate(&cublas_lt_handle_)); workspace_size_ = ParseIntegerFromEnv("ONEFLOW_EP_CUDA_CUBLAS_WORKSPACE_SIZE_MB", kDefaultWorkspaceSizeMb) * 1024 * 1024; OF_CUDA_CHECK(cudaMalloc(&workspace_, workspace_size_)); if (ctx->parallel_ctx().parallel_num() > 1) { parallel_conf_ = ctx->parallel_desc().parallel_conf(); } } ~MatmulGradKernelState() { OF_CUDA_CHECK(cudaStreamSynchronize(cuda_stream_)); OF_CUBLAS_CHECK(cublasLtDestroy(cublas_lt_handle_)); OF_CUDA_CHECK(cudaStreamDestroy(cuda_stream_)); OF_CUDA_CHECK(cudaStreamSynchronize(allreduce_stream_)); OF_CUDA_CHECK(cudaStreamDestroy(allreduce_stream_)); OF_CUDA_CHECK(cudaFree(workspace_)); } cudaStream_t grad_cuda_stream() const { return cuda_stream_; } cudaStream_t allreduce_stream() const { return allreduce_stream_; } cublasLtHandle_t cublas_lt_handle() const { return cublas_lt_handle_; } size_t cublas_workspace_size() const { return workspace_size_; } void* cublas_workspace() const { return workspace_; } bool IfCommCreate() const { if (!comm_) { return false; } return true; } bool IfNeedComm() const { return if_need_comm_; } ncclComm_t comm() { return GetOrCreate().comm; } const Comm& GetOrCreate() { if (!comm_) { InitCommMgr(); } return *comm_; } void InitNeedComm(user_op::KernelInitContext* ctx) { if_need_comm_ = false; if (ctx->parallel_ctx().parallel_num() > 1) { const int64_t d_weights_size = ctx->output_size("d_weights"); if (ctx->SbpParallel4ArgNameAndIndex("d_weights", 0).has_broadcast_parallel()) { for (int i = 0; i < d_weights_size; i++) { CHECK(ctx->SbpParallel4ArgNameAndIndex("d_weights", i).has_broadcast_parallel()) << "All d_weight's SBP should be Broadcast. "; CHECK(ctx->SbpParallel4ArgNameAndIndex("d_biases", i).has_broadcast_parallel()) << "All d_bias's SBP should be Broadcast. "; } if (ctx->SbpParallel4ArgNameAndIndex("dy", 0).has_split_parallel()) { if_need_comm_ = true; } } } } void InitCommMgr() { std::set<std::pair<int64_t, int64_t>> device_set; const ParallelDesc parallel_desc(parallel_conf_); for (int64_t parallel_id = 0; parallel_id < parallel_desc.parallel_num(); ++parallel_id) { int64_t machine_id = CHECK_JUST(parallel_desc.MachineId4ParallelId(parallel_id)); int64_t device_id = CHECK_JUST(parallel_desc.DeviceId4ParallelId(parallel_id)); device_set.emplace(std::make_pair(machine_id, device_id)); } EagerCclCommMgr* comm_mgr = CHECK_NOTNULL(Singleton<EagerCclCommMgr>::Get()); ncclComm_t comm; comm = comm_mgr->As<EagerNcclCommMgr>()->GetCommForDeviceAndStreamName(device_set, stream_name_); comm_.reset(new Comm(comm)); } private: cudaStream_t cuda_stream_{}; cudaStream_t allreduce_stream_{}; cublasLtHandle_t cublas_lt_handle_{}; void* workspace_{}; size_t workspace_size_; std::string stream_name_; std::unique_ptr<Comm> comm_; bool if_need_comm_; ParallelConf parallel_conf_; }; template<typename T> class CublasFusedMLPGradKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport { public: CublasFusedMLPGradKernel() { OF_CUDA_CHECK(cudaEventCreate(&main_stream_event_)); OF_CUDA_CHECK(cudaEventCreate(&async_weight_grad_event_)); OF_CUDA_CHECK(cudaEventCreate(&dweight_event_)); OF_CUDA_CHECK(cudaEventCreate(&allreduce_event_)); }; ~CublasFusedMLPGradKernel() override { OF_CUDA_CHECK(cudaEventDestroy(main_stream_event_)); OF_CUDA_CHECK(cudaEventDestroy(async_weight_grad_event_)); OF_CUDA_CHECK(cudaEventDestroy(dweight_event_)); OF_CUDA_CHECK(cudaEventDestroy(allreduce_event_)); }; std::shared_ptr<user_op::OpKernelCache> InitOpKernelCache( user_op::KernelCacheContext* ctx) const override { return CreateCublasFusedMLPKernelCache(); } std::shared_ptr<user_op::OpKernelState> CreateOpKernelState( user_op::KernelInitContext* ctx) const override { std::shared_ptr<MatmulGradKernelState> kernel_state = std::make_shared<MatmulGradKernelState>(ctx); kernel_state->InitNeedComm(ctx); return kernel_state; } private: cudaEvent_t main_stream_event_; cudaEvent_t async_weight_grad_event_; cudaEvent_t dweight_event_; cudaEvent_t allreduce_event_; bool IsReadyForCapture(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state, const user_op::OpKernelCache* cache) const override { auto* kernel_state = dynamic_cast<MatmulGradKernelState*>(state); if (kernel_state->IfNeedComm()) { return kernel_state->IfCommCreate(); } else { return true; } } using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state, const user_op::OpKernelCache* cache) const override { const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); int64_t tmp_buf_elem_cnt = tmp_buffer->shape_view().elem_cnt(); const int64_t weight_num = ctx->input_size("weights"); user_op::Tensor* d_x = ctx->Tensor4ArgNameAndIndex("d_x", 0); const std::vector<float> alpha_list = ctx->Attr<std::vector<float>>("alpha_list"); auto* kernel_state = dynamic_cast<MatmulGradKernelState*>(state); const auto* matmul_grad_cache = CHECK_NOTNULL(dynamic_cast<const CublasFusedMLPKernelCache*>(cache)); ncclComm_t comm{}; bool if_need_comm = kernel_state->IfNeedComm(); if (if_need_comm) { comm = kernel_state->comm(); } void* dy_tmp_buf = tmp_buffer->mut_dptr(); size_t tmp_buf_offset = 0; auto* cuda_stream = ctx->stream()->As<ep::CudaStream>(); const DataType data_type = dy->data_type(); const cublasComputeType_t cublas_compute_dtype = GetComputeType(data_type); const cudaDataType_t cuda_data_type = GetCudaDataType(data_type); size_t cublas_m = 0, cublas_n = 0, cublas_k = 0; int64_t cublas_lda = 0, cublas_ldb = 0, cublas_ldc = 0; const double alpha_one = 1.0; auto sp_alpha_one = GetCublasScalarParameter(alpha_one, cublas_compute_dtype); double alpha = 1.0; auto sp_alpha = GetCublasScalarParameter(alpha, cublas_compute_dtype); double beta = 0.0; auto sp_beta = GetCublasScalarParameter(beta, cublas_compute_dtype); cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_DEFAULT; // currently only support 2D matmul. DimVector weight_shape(2); DimVector hidden_shape(2); DimVector dy_shape(2); dy->shape_view().ToDimVector(&dy_shape); const void* dgrad_buf = dy->dptr(); const int64_t batch_size = dy->shape_view().At(0); const void* ones = nullptr; ep::CudaDevice* cuda_device = dynamic_cast<ep::CudaDevice*>(ctx->stream()->device()); CHECK_NOTNULL(cuda_device); ones = cuda_device->GetConstOnes(dy->data_type(), batch_size); if (ones == nullptr) { std::unique_ptr<ep::primitive::Fill> fill = ep::primitive::NewPrimitive<ep::primitive::FillFactory>(ctx->stream()->device_type(), data_type); CHECK(fill); fill->Launch(ctx->stream(), tmp_buffer->mut_dptr(), 1.0, batch_size); ones = tmp_buffer->mut_dptr(); tmp_buf_offset += GetCudaAlignedSize(batch_size * sizeof(T)); dy_tmp_buf = reinterpret_cast<void*>(tmp_buffer->mut_dptr<char>() + tmp_buf_offset); } for (int idx = weight_num - 1; idx >= 0; idx--) { const user_op::Tensor* weight = ctx->Tensor4ArgNameAndIndex("weights", idx); weight->shape_view().ToDimVector(&weight_shape); InferMatmulCublasMNK(dy_shape, weight_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); if (idx != 0) { alpha = alpha_list.at(idx - 1); sp_alpha = GetCublasScalarParameter(alpha, cublas_compute_dtype); const user_op::Tensor* aux = ctx->Tensor4ArgNameAndIndex("cublas_aux", idx - 1); user_op::Tensor* d_bias = ctx->Tensor4ArgNameAndIndex("d_biases", idx - 1); epilogue = CUBLASLT_EPILOGUE_DRELU_BGRAD; SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/true, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, d_bias->mut_dptr(), aux->dptr(), cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); /* a = dy, b = weight cublas_a=weight, cublas_b=dy */ OF_CUDA_CHECK(cudaEventRecord(main_stream_event_, cuda_stream->cuda_stream())); OF_CUBLAS_CHECK(cublasLtMatmul( cuda_stream->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha, weight->dptr(), matmul_grad_cache->cublas_a_desc, dgrad_buf, matmul_grad_cache->cublas_b_desc, &sp_beta, dy_tmp_buf, matmul_grad_cache->cublas_c_desc, dy_tmp_buf, matmul_grad_cache->cublas_c_desc, nullptr, cuda_stream->cublas_workspace(), cuda_stream->cublas_workspace_size(), cuda_stream->cuda_stream())); } else { epilogue = CUBLASLT_EPILOGUE_DEFAULT; SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/false, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, nullptr, nullptr, cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); /* a = dy, b = weight cublas_a=weight, cublas_b=dy */ OF_CUDA_CHECK(cudaEventRecord(main_stream_event_, cuda_stream->cuda_stream())); OF_CUBLAS_CHECK(cublasLtMatmul( cuda_stream->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha_one, weight->dptr(), matmul_grad_cache->cublas_a_desc, dgrad_buf, matmul_grad_cache->cublas_b_desc, &sp_beta, d_x->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_x->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, cuda_stream->cublas_workspace(), cuda_stream->cublas_workspace_size(), cuda_stream->cuda_stream())); } // step1: Get last layer's dbias. if (idx == weight_num - 1) { user_op::Tensor* d_last_bias = ctx->Tensor4ArgNameAndIndex("d_biases", weight_num - 1); DimVector ones_buf_shape(2); ones_buf_shape.at(0) = 1; ones_buf_shape.at(1) = batch_size; epilogue = CUBLASLT_EPILOGUE_DEFAULT; InferMatmulCublasMNK(ones_buf_shape, dy_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/false, /*transpose_a=*/ep::primitive::BlasTransposeType::N, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, nullptr, nullptr, cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); OF_CUDA_CHECK(cudaStreamWaitEvent(kernel_state->grad_cuda_stream(), main_stream_event_)); OF_CUBLAS_CHECK(cublasLtMatmul( kernel_state->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha_one, dgrad_buf, matmul_grad_cache->cublas_a_desc, ones, matmul_grad_cache->cublas_b_desc, &sp_beta, d_last_bias->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_last_bias->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, kernel_state->cublas_workspace(), kernel_state->cublas_workspace_size(), kernel_state->grad_cuda_stream())); } user_op::Tensor* d_weight = ctx->Tensor4ArgNameAndIndex("d_weights", idx); epilogue = CUBLASLT_EPILOGUE_DEFAULT; if (idx != 0) { const user_op::Tensor* hidden = ctx->Tensor4ArgNameAndIndex("hidden", idx - 1); // here hidden->shape_view().ToDimVector(&hidden_shape); InferMatmulCublasMNK(dy_shape, hidden_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::T, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/false, /*transpose_a=*/ep::primitive::BlasTransposeType::T, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, nullptr, nullptr, cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); if (idx != weight_num - 1) { // if idx == weight_num - 1, async_stream has wait main_stream_event_ in d_bias. OF_CUDA_CHECK(cudaStreamWaitEvent(kernel_state->grad_cuda_stream(), main_stream_event_)); } OF_CUBLAS_CHECK(cublasLtMatmul( kernel_state->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha_one, hidden->dptr(), matmul_grad_cache->cublas_a_desc, dgrad_buf, matmul_grad_cache->cublas_b_desc, &sp_beta, d_weight->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_weight->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, kernel_state->cublas_workspace(), kernel_state->cublas_workspace_size(), kernel_state->grad_cuda_stream())); OF_CUDA_CHECK(cudaEventRecord(dweight_event_, kernel_state->grad_cuda_stream())); // compute dy shape dy_shape.at(1) = weight_shape.at(1); // compute dybuf dgrad_buf = dy_tmp_buf; tmp_buf_offset += GetCudaAlignedSize(dy_shape.at(0) * dy_shape.at(1) * sizeof(T)); CHECK_LE(tmp_buf_offset, tmp_buf_elem_cnt) << "Tmp buffer offset should <= Tmp buffer elem_cnt. "; dy_tmp_buf = reinterpret_cast<void*>(tmp_buffer->mut_dptr<char>() + tmp_buf_offset); } else { x->shape_view().ToDimVector(&hidden_shape); InferMatmulCublasMNK(dy_shape, hidden_shape, /*transpose_a=*/ep::primitive::BlasTransposeType::T, /*transpose_b=*/ep::primitive::BlasTransposeType::N, &cublas_m, &cublas_n, &cublas_k, &cublas_lda, &cublas_ldb, &cublas_ldc); SetCublasAttr(matmul_grad_cache, cublas_compute_dtype, cuda_data_type, /*need_aux=*/false, /*transpose_a=*/ep::primitive::BlasTransposeType::T, /*transpose_b=*/ep::primitive::BlasTransposeType::N, epilogue, nullptr, nullptr, cublas_m, cublas_n, cublas_k, cublas_lda, cublas_ldb, cublas_ldc); OF_CUDA_CHECK(cudaStreamWaitEvent(kernel_state->grad_cuda_stream(), main_stream_event_)); OF_CUBLAS_CHECK(cublasLtMatmul( kernel_state->cublas_lt_handle(), matmul_grad_cache->operation_desc, &sp_alpha_one, x->dptr(), matmul_grad_cache->cublas_a_desc, dgrad_buf, matmul_grad_cache->cublas_b_desc, &sp_beta, d_weight->mut_dptr(), matmul_grad_cache->cublas_c_desc, d_weight->mut_dptr(), matmul_grad_cache->cublas_c_desc, nullptr, kernel_state->cublas_workspace(), kernel_state->cublas_workspace_size(), kernel_state->grad_cuda_stream())); OF_CUDA_CHECK(cudaEventRecord(dweight_event_, kernel_state->grad_cuda_stream())); } if (if_need_comm) { // Do Allreduce for d_bias and d_weight. // Here we wait wgrad event, and set a ncclGroup to Allreduce d_bias and d_weight. OF_CUDA_CHECK(cudaStreamWaitEvent(kernel_state->allreduce_stream(), dweight_event_)); OF_NCCL_CHECK(ncclGroupStart()); user_op::Tensor* allreduce_d_bias = ctx->Tensor4ArgNameAndIndex("d_biases", idx); OF_NCCL_CHECK(ncclAllReduce(allreduce_d_bias->mut_dptr(), allreduce_d_bias->mut_dptr(), allreduce_d_bias->shape_view().elem_cnt(), GetNcclDataType(allreduce_d_bias->data_type()), ncclRedOp_t::ncclSum, comm, kernel_state->allreduce_stream())); OF_NCCL_CHECK(ncclAllReduce(d_weight->mut_dptr(), d_weight->mut_dptr(), d_weight->shape_view().elem_cnt(), GetNcclDataType(d_weight->data_type()), ncclRedOp_t::ncclSum, comm, kernel_state->allreduce_stream())); OF_NCCL_CHECK(ncclGroupEnd()); if (idx == 0) { // We should sync allreduce before the kernel finish. OF_CUDA_CHECK(cudaEventRecord(allreduce_event_, kernel_state->allreduce_stream())); } } } if (if_need_comm) { OF_CUDA_CHECK(cudaStreamWaitEvent(cuda_stream->cuda_stream(), allreduce_event_)); } else { OF_CUDA_CHECK(cudaStreamWaitEvent(cuda_stream->cuda_stream(), dweight_event_)); } }; bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_CUBLAS_FUSED_MLP_GRAD_KERNEL(dtype) \ REGISTER_USER_KERNEL("cublas_fused_mlp_grad") \ .SetCreateFn<CublasFusedMLPGradKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ && (user_op::HobDataType("x", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext* ctx) { \ const int64_t weight_num = ctx->input_size("weights"); \ const Shape& dy_shape = ctx->InputShape("dy", 0); \ int64_t m = dy_shape.At(0); \ int64_t k = dy_shape.At(1); \ int64_t tmp_buffer_size = 0; \ tmp_buffer_size += GetCudaAlignedSize(m * sizeof(dtype)); /*For last layer's bias grad*/ \ for (int idx = weight_num - 1; idx > 0; idx--) { \ const Shape& weight_shape = ctx->InputShape("weights", idx); \ k = weight_shape.At(1); \ tmp_buffer_size += GetCudaAlignedSize(m * k * sizeof(dtype)); \ } \ return tmp_buffer_size; \ }); REGISTER_CUBLAS_FUSED_MLP_GRAD_KERNEL(float) REGISTER_CUBLAS_FUSED_MLP_GRAD_KERNEL(double) REGISTER_CUBLAS_FUSED_MLP_GRAD_KERNEL(half) REGISTER_USER_KERNEL_UNIFIED_NCCL_COMM_INIT("cublas_fused_mlp_grad"); } // namespace } // namespace oneflow #endif // CUDA_VERSION >= 11060
imshrunkG.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <pthread.h> #include <stdint.h> #include <ctype.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <malloc.h> #include "ImageStuff.h" #define REPS 1 #define MAXTHREADS 128 #define N 32 long NumThreads; // Total number of threads working in parallel int ThParam[MAXTHREADS]; // Thread parameters ... pthread_t ThHandle[MAXTHREADS]; // Thread handles pthread_attr_t ThAttr; // Pthread attrributes unsigned char** TheImage; // This is the main image unsigned char** CopyImage; // This is the copy image struct ImgProp ip; __global__ void shrunk(int xshrink, int yshrink,unsigned char **ImageGPU ,unsigned char **CopyImage_GPU) { // My thread number (ID) is stored here int row,col,hp3; int NewRow,NewCol; int xskip,yskip; int xbound,ybound; //yskip = yshrink -1; ybound = ip.Vpixels/yshrink; xbound = ip.Hpixels*3/xshrink; hp3=ip.Hpixels*3; NewRow=0; row=0; while(NewRow < ybound) { col=0; NewCol=0; while(NewCol <xbound ) { CopyImage_GPU[NewRow][NewCol] = ImageGPU[row][col]; CopyImage_GPU[NewRow][NewCol+1] = ImageGPU[row][col+1]; CopyImage_GPU[NewRow][NewCol+2] = ImageGPU[row][col+2]; NewCol +=3; col+=xshrink + xshrink + xshrink ; } row+=yshrink; NewRow++; } } int main(int argc, char** argv) { int a,i,ThErr; struct timeval t; double StartTime, EndTime; double TimeElapsed; char FuncName[50]; int xshrink, yshrink; // GPU variable int threadsPerBlock = 16; int numBlocks = N/threadsPerBlock; unsigned char** ImageGPU; unsigned char** CopyImage_GPU; NumThreads = 8; xshrink = atoi(argv[3]); yshrink = atoi(argv[4]); if(argc != 5) { printf("\nUsage: inputBMP outputBMP xshrink yshrink \n\n"); printf("Nothing executed ... Exiting ...\n\n"); exit(EXIT_FAILURE); } TheImage = ReadBMP(argv[1]); printf("\nVpixels: %i Hpixels: %i \n",ip.Vpixels,ip.Hbytes); printf("yshrink: %i xshrink: %i \n",(ip.Vpixels/yshrink ),((ip.Hbytes/xshrink))); //********************** CopyImage= (unsigned char **)malloc(((ip.Vpixels/yshrink )) * sizeof(unsigned char*)); for(i=0; i<(ip.Vpixels/yshrink ); i++) { CopyImage[i] = (unsigned char *)malloc((ip.Hbytes/xshrink) * sizeof(unsigned char)); } cudaMallloc ((unsigned char **)&CopyImage_GPU,(ip.Vpixels/yshrink ) * sizeof(unsigned char*)); cudaMallloc ((unsigned char **)&ImageGPU,(ip.Vpixels ) * sizeof(unsigned char*)); for(i=0; i<(ip.Vpixels/yshrink ); i++) { cudaMallloc ((unsigned char *)&CopyImage_GPU[i],(ip.Hbytes/yshrink ) * sizeof(unsigned char)); cudaMallloc ((unsigned char *)&ImageGPU[i],ip.Hbytes * sizeof(unsigned char)); } hipMemcpy2D(ImageGPU,TheImage, hipMemcpyHostToDevice); //hipMemcpy(CopyImageGPU,CopyImage, hipMemcpyHostToDevice); gettimeofday(&t, NULL); StartTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec); for(a=0; a<REPS; a++) { hipLaunchKernelGGL(( shrunk), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, xshrink,yshrinkImageGPU ,CopyImage_GPU); } gettimeofday(&t, NULL); EndTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec); TimeElapsed=(EndTime-StartTime)/1000.00; TimeElapsed/=(double)REPS; //merge with header and write to file ip.Hbytes /= xshrink; ip.Vpixels /= yshrink; ip.Hpixels /= xshrink; hipMemcpy2D(CopyImage,CopyImageGPU, hipMemcpyDeviceToHost) WriteBMP(CopyImage, argv[2]); // free() the allocated area for the images for(i = 0; i < ip.Vpixels; i++) { free(TheImage[i]); free(CopyImage[i]); } free(TheImage); free(CopyImage); hipFree(ImageGPU); hipFree(CopyImageGPU); hipFree(xshrink); hipFree(yshrink); printf("\n\nExecution time:%10.4f ms ",TimeElapsed); if(NumThreads>=1) printf("(%10.4f Thread-ms) ",TimeElapsed*(double)NumThreads); printf(" (%6.3f ns/pixel)\n", 1000000*TimeElapsed/(double)(ip.Hpixels*ip.Vpixels)); return (EXIT_SUCCESS); }
imshrunkG.cu
#include <pthread.h> #include <stdint.h> #include <ctype.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <malloc.h> #include "ImageStuff.h" #define REPS 1 #define MAXTHREADS 128 #define N 32 long NumThreads; // Total number of threads working in parallel int ThParam[MAXTHREADS]; // Thread parameters ... pthread_t ThHandle[MAXTHREADS]; // Thread handles pthread_attr_t ThAttr; // Pthread attrributes unsigned char** TheImage; // This is the main image unsigned char** CopyImage; // This is the copy image struct ImgProp ip; __global__ void shrunk(int xshrink, int yshrink,unsigned char **ImageGPU ,unsigned char **CopyImage_GPU) { // My thread number (ID) is stored here int row,col,hp3; int NewRow,NewCol; int xskip,yskip; int xbound,ybound; //yskip = yshrink -1; ybound = ip.Vpixels/yshrink; xbound = ip.Hpixels*3/xshrink; hp3=ip.Hpixels*3; NewRow=0; row=0; while(NewRow < ybound) { col=0; NewCol=0; while(NewCol <xbound ) { CopyImage_GPU[NewRow][NewCol] = ImageGPU[row][col]; CopyImage_GPU[NewRow][NewCol+1] = ImageGPU[row][col+1]; CopyImage_GPU[NewRow][NewCol+2] = ImageGPU[row][col+2]; NewCol +=3; col+=xshrink + xshrink + xshrink ; } row+=yshrink; NewRow++; } } int main(int argc, char** argv) { int a,i,ThErr; struct timeval t; double StartTime, EndTime; double TimeElapsed; char FuncName[50]; int xshrink, yshrink; // GPU variable int threadsPerBlock = 16; int numBlocks = N/threadsPerBlock; unsigned char** ImageGPU; unsigned char** CopyImage_GPU; NumThreads = 8; xshrink = atoi(argv[3]); yshrink = atoi(argv[4]); if(argc != 5) { printf("\nUsage: inputBMP outputBMP xshrink yshrink \n\n"); printf("Nothing executed ... Exiting ...\n\n"); exit(EXIT_FAILURE); } TheImage = ReadBMP(argv[1]); printf("\nVpixels: %i Hpixels: %i \n",ip.Vpixels,ip.Hbytes); printf("yshrink: %i xshrink: %i \n",(ip.Vpixels/yshrink ),((ip.Hbytes/xshrink))); //********************** CopyImage= (unsigned char **)malloc(((ip.Vpixels/yshrink )) * sizeof(unsigned char*)); for(i=0; i<(ip.Vpixels/yshrink ); i++) { CopyImage[i] = (unsigned char *)malloc((ip.Hbytes/xshrink) * sizeof(unsigned char)); } cudaMallloc ((unsigned char **)&CopyImage_GPU,(ip.Vpixels/yshrink ) * sizeof(unsigned char*)); cudaMallloc ((unsigned char **)&ImageGPU,(ip.Vpixels ) * sizeof(unsigned char*)); for(i=0; i<(ip.Vpixels/yshrink ); i++) { cudaMallloc ((unsigned char *)&CopyImage_GPU[i],(ip.Hbytes/yshrink ) * sizeof(unsigned char)); cudaMallloc ((unsigned char *)&ImageGPU[i],ip.Hbytes * sizeof(unsigned char)); } cudaMemcpy2D(ImageGPU,TheImage, cudaMemcpyHostToDevice); //cudaMemcpy(CopyImageGPU,CopyImage, cudaMemcpyHostToDevice); gettimeofday(&t, NULL); StartTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec); for(a=0; a<REPS; a++) { shrunk<<<numBlocks, threadsPerBlock>>>(xshrink,yshrink,ImageGPU ,CopyImage_GPU); } gettimeofday(&t, NULL); EndTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec); TimeElapsed=(EndTime-StartTime)/1000.00; TimeElapsed/=(double)REPS; //merge with header and write to file ip.Hbytes /= xshrink; ip.Vpixels /= yshrink; ip.Hpixels /= xshrink; cudaMemcpy2D(CopyImage,CopyImageGPU, cudaMemcpyDeviceToHost) WriteBMP(CopyImage, argv[2]); // free() the allocated area for the images for(i = 0; i < ip.Vpixels; i++) { free(TheImage[i]); free(CopyImage[i]); } free(TheImage); free(CopyImage); cudaFree(ImageGPU); cudaFree(CopyImageGPU); cudaFree(xshrink); cudaFree(yshrink); printf("\n\nExecution time:%10.4f ms ",TimeElapsed); if(NumThreads>=1) printf("(%10.4f Thread-ms) ",TimeElapsed*(double)NumThreads); printf(" (%6.3f ns/pixel)\n", 1000000*TimeElapsed/(double)(ip.Hpixels*ip.Vpixels)); return (EXIT_SUCCESS); }
011cf2fb0d10cd3bcf6f90aff83eaf3c036a674d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Netherlands eScience Center * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This program benchmarks four different implementations for * overlapping CPU-GPU communication and GPU computation of a * matrix multiplication kernel. * * The kernel is assumed to be tuned to each device by selecting * the best performing combination of thread block dimensions * and tiling factors in X and Y. In this implementation tiling * in X increases the amount of work per thread block and tiling * in Y increases the amount of work per thread. * * @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl> * */ #include <stdio.h> #include <stdlib.h> #define WIDTH 4096 #define HEIGHT 4096 //Select best kernel configuration for your device //Tesla K20 //#define BLOCK_X 32 //#define BLOCK_Y 8 //#define TILE_Y 4 //#define TILE_X 8 //GTX 480 #define BLOCK_X 32 #define BLOCK_Y 8 #define TILE_Y 4 #define TILE_X 8 //GTX Titan //#define BLOCK_X 64 //#define BLOCK_Y 8 //#define TILE_Y 8 //#define TILE_X 2 #define NSTREAMS (WIDTH/BLOCK_X) #define ITERATIONS 5 //for naive #define BLOCK_SIZE 32 #define CUDA_CHECK_ERROR(errorMessage) do { \ hipError_t err = hipGetLastError(); \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ err = hipDeviceSynchronize(); \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\ exit(EXIT_FAILURE); \ } } while (0) /* "" */ extern "C" { void matmul (float *res, float *mat, float *vec); void matmul_explicit (float *res, float *mat, float *vec); void matmul_implicit (float *C, float *A, float *B); void matmul_streams (float *C, float *A, float *B); void matmul_hybrid (float *C, float *A, float *B); void matmul_naive (float *C, float *A, float *B); void start_timer (); void stop_timer (float *); int compare (float *a, float *b, int N); __global__ void matmul_kernel_shared (float *C, float *A, float *B); __global__ void matmul_kernel_opt (float *C, float *A, float *B); } int nStreams = -1; hipStream_t stream[NSTREAMS]; hipEvent_t event_htod[NSTREAMS]; float *h_A; float *h_B; float *h_C; float *h_Cref; float *d_A; float *d_B; float *d_C; int main () { hipError_t err; hipSetDeviceFlags (hipDeviceMapHost); hipSetDevice (0); hipDeviceSetCacheConfig (hipFuncCachePreferShared); hipDeviceSetSharedMemConfig (hipSharedMemBankSizeFourByte); //setup streams for (int k = 0; k < NSTREAMS; k++) { err = hipStreamCreate (&stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipStreamCreate: %s\n", hipGetErrorString (err)); } err = hipEventCreate (&event_htod[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipEventCreate htod: %s\n", hipGetErrorString (err)); } } //setup memory err = hipHostMalloc ((void **) &h_A, WIDTH * HEIGHT * sizeof (float), hipHostMallocMapped); if (err != hipSuccess) { fprintf (stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString (err)); } err = hipHostMalloc ((void **) &h_B, WIDTH * HEIGHT * sizeof (float), hipHostMallocMapped); if (err != hipSuccess) { fprintf (stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString (err)); } err = hipHostMalloc ((void **) &h_C, WIDTH * HEIGHT * sizeof (float), hipHostMallocMapped); if (err != hipSuccess) { fprintf (stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString (err)); } err = hipHostMalloc ((void **) &h_Cref, WIDTH * HEIGHT * sizeof (float), hipHostMallocMapped); if (err != hipSuccess) { fprintf (stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString (err)); } for (int y = 0; y < HEIGHT; y++) { for (int x = 0; x < WIDTH; x++) { int r = rand (); h_A[y * (WIDTH) + x] = 0.000001 + (r % 999) / 1000.0; r = rand (); h_B[y * (WIDTH) + x] = 0.000001 + (r % 500) / 5000.0; } } //error checking hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After setup"); //create reference answer for correctness checks memset (h_Cref, 0, WIDTH * HEIGHT * sizeof (float)); memset (h_C, 0, WIDTH * HEIGHT * sizeof (float)); matmul_naive (h_Cref, h_A, h_B); //run four different implementations for (int k = 0; k < ITERATIONS; k++) { matmul_explicit (h_C, h_A, h_B); } compare (h_Cref, h_C, WIDTH * HEIGHT); for (int k = 0; k < ITERATIONS; k++) { matmul_implicit (h_C, h_A, h_B); } compare (h_Cref, h_C, WIDTH * HEIGHT); for (int k = 0; k < ITERATIONS; k++) { matmul_streams (h_C, h_A, h_B); } compare (h_Cref, h_C, WIDTH * HEIGHT); for (int k = 0; k < ITERATIONS; k++) { matmul_hybrid (h_C, h_A, h_B); } compare (h_Cref, h_C, WIDTH * HEIGHT); return 0; } //reference implementation, not called by this program void matmul (float *C, float *A, float *B) { int x, y, k; float sum = 0.0f; for (y = 0; y < HEIGHT; y++) { for (x = 0; x < WIDTH; x++) { sum = 0.0f; for (k = 0; k < WIDTH; k++) { sum += A[y * WIDTH + k] * B[k * WIDTH + x]; } C[y * WIDTH + x] = sum; } } } /* * Naive CUDA kernel for matrix multiplication * * not called in this program, included for completeness and clarity. */ __global__ void matmul_kernel (float *C, float *A, float *B) { int x = blockIdx.x * BLOCK_X + threadIdx.x; int y = blockIdx.y * BLOCK_Y + threadIdx.y; int k; float sum = 0.0f; if ((x < WIDTH) && (y < HEIGHT)) { for (k = 0; k < WIDTH; k++) { sum += A[y * WIDTH + k] * B[k * WIDTH + x]; } C[y * WIDTH + x] = sum; } } /* * Slightly less naive CUDA kernel for matrix multiplication * * This implementation is used to compare the results of the produced * by the optimized and different schemes for overlapping communication * and computation. The main reason to use another kernel to compare * results is that a naive CPU version would take forever. * * In this kernel a thread block uses a tile of shared memory to * cooperatively load and store the values required for each computation * step. * */ __global__ void matmul_kernel_shared (float *C, float *A, float *B) { __shared__ float sA[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float sB[BLOCK_SIZE][BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int x = blockIdx.x * BLOCK_SIZE + threadIdx.x; int y = blockIdx.y * BLOCK_SIZE + threadIdx.y; int k, kb; float sum = 0.0f; if ((x < WIDTH) && (y < HEIGHT)) { for (k = 0; k < WIDTH; k += BLOCK_SIZE) { __syncthreads (); sA[ty][tx] = A[y * WIDTH + k + tx]; sB[ty][tx] = B[(k + ty) * WIDTH + x]; __syncthreads (); for (kb = 0; kb < BLOCK_SIZE; kb++) { sum += sA[ty][kb] * sB[kb][tx]; } } C[y * WIDTH + x] = sum; } } /* * Optimized CUDA kernel for matrix multiplication * * This kernel is optimized and tuned according to the directions given * in: "Better performance at lower occupancy" by V. Volkov, * GPU Technology Conference, GTC 2010. * * The thread block dimensions as well as tiling factors are tuned to- * wards each GPU used as part of our evaluation. * */ __global__ void matmul_kernel_opt (float *C, float *A, float *B) { __shared__ float sA[BLOCK_X][BLOCK_X]; __shared__ float sB[BLOCK_X][BLOCK_X * TILE_X]; int tx = threadIdx.x; int ty = threadIdx.y; int x = blockIdx.x * BLOCK_X * TILE_X + threadIdx.x; int y = blockIdx.y * BLOCK_Y * TILE_Y + threadIdx.y; int k, kb; #if(TILE_X == 8) #if(TILE_Y == 4) float sum[TILE_X][TILE_Y] = { {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0} }; #endif #elif(TILE_X == 2) #if(TILE_Y == 8) float sum[TILE_X][TILE_Y] = { {0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0} }; #endif #endif for (k = 0; k < WIDTH; k += BLOCK_X) { __syncthreads (); #pragma unroll for (int i = 0; i < TILE_Y; i++) { sA[ty + BLOCK_Y * i][tx] = A[y * WIDTH + BLOCK_Y * i * WIDTH + k + tx]; #pragma unroll for (int j = 0; j < TILE_X; j++) { sB[ty + BLOCK_Y * i][tx + j * BLOCK_X] = B[(k + ty + BLOCK_Y * i) * WIDTH + x + j * BLOCK_X]; } } __syncthreads (); //compute #pragma unroll for (kb = 0; kb < BLOCK_X; kb++) { #pragma unroll for (int i = 0; i < TILE_Y; i++) { #pragma unroll for (int j = 0; j < TILE_X; j++) { sum[j][i] += sA[ty + BLOCK_Y * i][kb] * sB[kb][tx + j * BLOCK_X]; } } } } //store result #pragma unroll for (int i = 0; i < TILE_Y; i++) { #pragma unroll for (int j = 0; j < TILE_X; j++) { C[y * WIDTH + x + BLOCK_Y * i * WIDTH + j * BLOCK_X] = sum[j][i]; } } } /* * Host code that invokes the matrix multiplication kernel * * The explicit implementation uses explicit memory copy * statements to move all data to the GPU, executes the * GPU kernel, and uses memory copies to copy the output * data back to host memory. This implementation achieves * no overlap between transfers and/or computation. * */ void matmul_explicit (float *C, float *A, float *B) { hipError_t err; err = hipMalloc ((void **) &d_A, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_A: %s\n", hipGetErrorString (err)); } err = hipMalloc ((void **) &d_B, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_B: %s\n", hipGetErrorString (err)); } err = hipMalloc ((void **) &d_C, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_C: %s\n", hipGetErrorString (err)); } err = hipMemset (d_C, 0, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemset d_C: %s\n", hipGetErrorString (err)); } hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); dim3 threads (BLOCK_X, BLOCK_Y); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_X * TILE_X)), (int) ceilf ((float) HEIGHT / (float) (BLOCK_Y * TILE_Y))); float time; hipDeviceSynchronize (); start_timer (); err = hipMemcpyAsync (d_B, B, WIDTH * HEIGHT * sizeof (float), hipMemcpyHostToDevice, stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device B: %s\n", hipGetErrorString (err)); } err = hipMemcpyAsync (d_A, A, WIDTH * HEIGHT * sizeof (float), hipMemcpyHostToDevice, stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device A: %s\n", hipGetErrorString (err)); } hipLaunchKernelGGL(( matmul_kernel_opt) , dim3(grid), dim3(threads), 0, stream[1] , d_C, d_A, d_B); err = hipMemcpyAsync (C, d_C, WIDTH * HEIGHT * sizeof (float), hipMemcpyDeviceToHost, stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy device to host C: %s\n", hipGetErrorString (err)); } hipDeviceSynchronize (); stop_timer (&time); hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After Explicit4"); printf ("EXPLICIT: %.6f ms\n", time); hipDeviceSynchronize (); start_timer (); hipLaunchKernelGGL(( matmul_kernel_opt) , dim3(grid), dim3(threads), 0, stream[1] , d_C, d_A, d_B); hipDeviceSynchronize (); stop_timer (&time); float flops = 2.0 * (WIDTH * HEIGHT) * (WIDTH); float giga = 1000000000.0; printf ("EXPLICIT kernel: %.6f ms\t %.3f GFLOP/s \n", time, (flops / giga) / (time / 1000.0)); hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel explicit"); hipFree (d_C); hipFree (d_B); hipFree (d_A); } /* * Host code that invokes the matrix multiplication kernel * * The implicit implementation uses device-mapped host memory rather * than explicit memory copy statements. A different kernel is used * to ensure strictly coalesced access to system memory. * */ void matmul_implicit (float *C, float *A, float *B) { hipDeviceSynchronize (); CUDA_CHECK_ERROR ("before execution"); dim3 threads (BLOCK_X, BLOCK_Y); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_X * TILE_X)), (int) ceilf ((float) HEIGHT / (float) (BLOCK_Y * TILE_Y))); float time; hipDeviceSynchronize (); start_timer (); hipLaunchKernelGGL(( matmul_kernel_opt) , dim3(grid), dim3(threads), 0, stream[1] , C, A, B); hipDeviceSynchronize (); stop_timer (&time); printf ("IMPLICIT: %.6f ms\n", time); hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel"); } /* * Host code that invokes the matrix multiplication kernel * * The streams implementation uses CUDA streams combined * with explicit memory copy statements. This way transfers * in one stream may overlap with computation and transfers * in other streams. * */ void matmul_streams (float *C, float *A, float *B) { hipError_t err; int k; err = hipMalloc ((void **) &d_A, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_A: %s\n", hipGetErrorString (err)); } err = hipMalloc ((void **) &d_B, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_B: %s\n", hipGetErrorString (err)); } err = hipMalloc ((void **) &d_C, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_C: %s\n", hipGetErrorString (err)); } err = hipMemset (d_C, 0, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemset d_C: %s\n", hipGetErrorString (err)); } hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); dim3 threads (BLOCK_X, BLOCK_Y); // dim3 grid( (int)ceilf((float)WIDTH / (float)(BLOCK_X)) , (int)ceilf((float)HEIGHT / (float)(BLOCK_Y))); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_X * TILE_X)), 1); // dim3 grid( (int)ceilf((float)WIDTH / (float)(BLOCK_X)) , 1); int lps = WIDTH * BLOCK_Y * TILE_Y; float time; hipDeviceSynchronize (); start_timer (); err = hipMemcpyAsync (d_B, B, WIDTH * HEIGHT * sizeof (float), hipMemcpyHostToDevice, stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device B: %s\n", hipGetErrorString (err)); } err = hipEventRecord (event_htod[1], stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipEventRecord htod: %s\n", hipGetErrorString (err)); } for (k = 0; k < NSTREAMS; k++) { err = hipMemcpyAsync (d_A + k * lps, A + k * lps, lps * sizeof (float), hipMemcpyHostToDevice, stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device scratch: %s\n", hipGetErrorString (err)); } } for (k = 0; k < NSTREAMS; k++) { //wait for memcpy in stream 1 to be complete err = hipStreamWaitEvent (stream[k], event_htod[1], 0); if (err != hipSuccess) { fprintf (stderr, "Error in hipStreamWaitEvent htod 1: %s\n", hipGetErrorString (err)); } hipLaunchKernelGGL(( matmul_kernel_opt) , dim3(grid), dim3(threads), 0, stream[k] , d_C + k * lps, d_A + k * lps, d_B); } for (k = 0; k < NSTREAMS; k++) { err = hipMemcpyAsync (C + k * lps, d_C + k * lps, lps * sizeof (float), hipMemcpyDeviceToHost, stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy device to host C: %s\n", hipGetErrorString (err)); } } hipDeviceSynchronize (); stop_timer (&time); printf ("STREAMS: %.6f ms\n", time); hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel"); hipFree (d_C); hipFree (d_B); hipFree (d_A); } /* * Host code that invokes the matrix multiplication kernel * * The Hybrid implementation uses CUDA streams combined * with explicit memory copy statements for the input data * and uses device-mapped host memory to copy the output data * back to host memory. * */ void matmul_hybrid (float *C, float *A, float *B) { hipError_t err; int k; err = hipMalloc ((void **) &d_A, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_A: %s\n", hipGetErrorString (err)); } err = hipMalloc ((void **) &d_B, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_B: %s\n", hipGetErrorString (err)); } hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); dim3 threads (BLOCK_X, BLOCK_Y); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_X * TILE_X)), 1); int lps = WIDTH * BLOCK_Y * TILE_Y; float time; hipDeviceSynchronize (); start_timer (); err = hipMemcpyAsync (d_B, B, WIDTH * HEIGHT * sizeof (float), hipMemcpyHostToDevice, stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device B: %s\n", hipGetErrorString (err)); } err = hipEventRecord (event_htod[1], stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipEventRecord htod: %s\n", hipGetErrorString (err)); } for (k = 0; k < NSTREAMS; k++) { err = hipMemcpyAsync (d_A + k * lps, A + k * lps, lps * sizeof (float), hipMemcpyHostToDevice, stream[k]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device scratch: %s\n", hipGetErrorString (err)); } } for (k = 0; k < NSTREAMS; k++) { //wait for memcpy in stream 1 to be complete err = hipStreamWaitEvent (stream[k], event_htod[1], 0); if (err != hipSuccess) { fprintf (stderr, "Error in hipStreamWaitEvent htod 1: %s\n", hipGetErrorString (err)); } hipLaunchKernelGGL(( matmul_kernel_opt) , dim3(grid), dim3(threads), 0, stream[k] , C + k * lps, d_A + k * lps, d_B); } hipDeviceSynchronize (); stop_timer (&time); printf ("HYBRID: %.6f ms\n", time); hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel"); hipFree (d_B); hipFree (d_A); } /* * Host code that invokes the naive matrix multiplication kernel * * The naive kernel is used to verify results from the other * implementations. It uses explicit memory copy * statements to move all data to the GPU, executes the * naive kernel, and uses memory copies to copy the output * data back to host memory. This implementation achieves * no overlap between transfers and/or computation. * */ void matmul_naive (float *C, float *A, float *B) { hipError_t err; err = hipMalloc ((void **) &d_A, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_A: %s\n", hipGetErrorString (err)); } err = hipMalloc ((void **) &d_B, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_B: %s\n", hipGetErrorString (err)); } err = hipMalloc ((void **) &d_C, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMalloc d_C: %s\n", hipGetErrorString (err)); } err = hipMemset (d_C, 0, WIDTH * HEIGHT * sizeof (float)); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemset d_C: %s\n", hipGetErrorString (err)); } hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); //NAIVE - DO NOT CHANGE - dim3 threads (BLOCK_SIZE, BLOCK_SIZE); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_SIZE)), (int) ceilf ((float) HEIGHT / (float) (BLOCK_SIZE))); float time; hipDeviceSynchronize (); start_timer (); err = hipMemcpyAsync (d_B, B, WIDTH * HEIGHT * sizeof (float), hipMemcpyHostToDevice, stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device B: %s\n", hipGetErrorString (err)); } err = hipMemcpyAsync (d_A, A, WIDTH * HEIGHT * sizeof (float), hipMemcpyHostToDevice, stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy host to device A: %s\n", hipGetErrorString (err)); } //NAIVE - DO NOT CHANGE - hipLaunchKernelGGL(( matmul_kernel_shared) , dim3(grid), dim3(threads), 0, stream[1] , d_C, d_A, d_B); err = hipMemcpyAsync (C, d_C, WIDTH * HEIGHT * sizeof (float), hipMemcpyDeviceToHost, stream[1]); if (err != hipSuccess) { fprintf (stderr, "Error in hipMemcpy device to host C: %s\n", hipGetErrorString (err)); } hipDeviceSynchronize (); stop_timer (&time); hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After Naive"); printf ("NAIVE: %.6f ms\n", time); hipDeviceSynchronize (); start_timer (); //NAIVE - DO NOT CHANGE - hipLaunchKernelGGL(( matmul_kernel_shared) , dim3(grid), dim3(threads), 0, stream[1] , d_C, d_A, d_B); hipDeviceSynchronize (); stop_timer (&time); float flops = 2.0 * (WIDTH * HEIGHT) * (WIDTH); float giga = 1000000000.0; printf ("NAIVE kernel: %.6f ms\t %.3f GFLOP/s \n", time, (flops / giga) / (time / 1000.0)); hipDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel explicit"); hipFree (d_C); hipFree (d_B); hipFree (d_A); } int compare (float *a1, float *a2, int N) { int i = 0, res = 0; int print = 0; int zero_one = 0; int zero_two = 0; float eps = 0.000001; for (i = 0; i < N; i++) { if (a1[i] < eps && a1[i] > -eps) { zero_one++; } if (a2[i] < eps && a2[i] > -eps) { zero_two++; } if (isnan (a1[i]) || isnan (a2[i])) { res++; if (print < 10) { print++; fprintf (stderr, "Error detected at i=%d,\t a1= %10.7e \t a2= \t %10.7e\n", i, a1[i], a2[i]); } } float diff = a1[i] - a2[i]; if (diff > eps || diff < -eps) { res++; if (print < 10) { print++; fprintf (stderr, "Error detected at i=%d,\t a1= \t %10.7e \t a2= \t %10.7e\n", i, a1[i], a2[i]); } } } if (zero_one > (N / 4)) { fprintf (stderr, "Error: array1 contains %d zeros\n", zero_one); } if (zero_two > (N / 4)) { fprintf (stderr, "Error: array2 contains %d zeros\n", zero_two); } if (zero_one != zero_two) { fprintf (stderr, "Error: number of zeros in arrays dont correspond zero1=%d, zero2=%d\n", zero_one, zero_two); } if (res > 0) { fprintf (stdout, "Number of errors in GPU result: %d\n", res); } return res; }
011cf2fb0d10cd3bcf6f90aff83eaf3c036a674d.cu
/* * Copyright 2014 Netherlands eScience Center * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This program benchmarks four different implementations for * overlapping CPU-GPU communication and GPU computation of a * matrix multiplication kernel. * * The kernel is assumed to be tuned to each device by selecting * the best performing combination of thread block dimensions * and tiling factors in X and Y. In this implementation tiling * in X increases the amount of work per thread block and tiling * in Y increases the amount of work per thread. * * @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl> * */ #include <stdio.h> #include <stdlib.h> #define WIDTH 4096 #define HEIGHT 4096 //Select best kernel configuration for your device //Tesla K20 //#define BLOCK_X 32 //#define BLOCK_Y 8 //#define TILE_Y 4 //#define TILE_X 8 //GTX 480 #define BLOCK_X 32 #define BLOCK_Y 8 #define TILE_Y 4 #define TILE_X 8 //GTX Titan //#define BLOCK_X 64 //#define BLOCK_Y 8 //#define TILE_Y 8 //#define TILE_X 2 #define NSTREAMS (WIDTH/BLOCK_X) #define ITERATIONS 5 //for naive #define BLOCK_SIZE 32 #define CUDA_CHECK_ERROR(errorMessage) do { \ cudaError_t err = cudaGetLastError(); \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ err = cudaThreadSynchronize(); \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\ exit(EXIT_FAILURE); \ } } while (0) /* "" */ extern "C" { void matmul (float *res, float *mat, float *vec); void matmul_explicit (float *res, float *mat, float *vec); void matmul_implicit (float *C, float *A, float *B); void matmul_streams (float *C, float *A, float *B); void matmul_hybrid (float *C, float *A, float *B); void matmul_naive (float *C, float *A, float *B); void start_timer (); void stop_timer (float *); int compare (float *a, float *b, int N); __global__ void matmul_kernel_shared (float *C, float *A, float *B); __global__ void matmul_kernel_opt (float *C, float *A, float *B); } int nStreams = -1; cudaStream_t stream[NSTREAMS]; cudaEvent_t event_htod[NSTREAMS]; float *h_A; float *h_B; float *h_C; float *h_Cref; float *d_A; float *d_B; float *d_C; int main () { cudaError_t err; cudaSetDeviceFlags (cudaDeviceMapHost); cudaSetDevice (0); cudaDeviceSetCacheConfig (cudaFuncCachePreferShared); cudaDeviceSetSharedMemConfig (cudaSharedMemBankSizeFourByte); //setup streams for (int k = 0; k < NSTREAMS; k++) { err = cudaStreamCreate (&stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaStreamCreate: %s\n", cudaGetErrorString (err)); } err = cudaEventCreate (&event_htod[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaEventCreate htod: %s\n", cudaGetErrorString (err)); } } //setup memory err = cudaHostAlloc ((void **) &h_A, WIDTH * HEIGHT * sizeof (float), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err)); } err = cudaHostAlloc ((void **) &h_B, WIDTH * HEIGHT * sizeof (float), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err)); } err = cudaHostAlloc ((void **) &h_C, WIDTH * HEIGHT * sizeof (float), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err)); } err = cudaHostAlloc ((void **) &h_Cref, WIDTH * HEIGHT * sizeof (float), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString (err)); } for (int y = 0; y < HEIGHT; y++) { for (int x = 0; x < WIDTH; x++) { int r = rand (); h_A[y * (WIDTH) + x] = 0.000001 + (r % 999) / 1000.0; r = rand (); h_B[y * (WIDTH) + x] = 0.000001 + (r % 500) / 5000.0; } } //error checking cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After setup"); //create reference answer for correctness checks memset (h_Cref, 0, WIDTH * HEIGHT * sizeof (float)); memset (h_C, 0, WIDTH * HEIGHT * sizeof (float)); matmul_naive (h_Cref, h_A, h_B); //run four different implementations for (int k = 0; k < ITERATIONS; k++) { matmul_explicit (h_C, h_A, h_B); } compare (h_Cref, h_C, WIDTH * HEIGHT); for (int k = 0; k < ITERATIONS; k++) { matmul_implicit (h_C, h_A, h_B); } compare (h_Cref, h_C, WIDTH * HEIGHT); for (int k = 0; k < ITERATIONS; k++) { matmul_streams (h_C, h_A, h_B); } compare (h_Cref, h_C, WIDTH * HEIGHT); for (int k = 0; k < ITERATIONS; k++) { matmul_hybrid (h_C, h_A, h_B); } compare (h_Cref, h_C, WIDTH * HEIGHT); return 0; } //reference implementation, not called by this program void matmul (float *C, float *A, float *B) { int x, y, k; float sum = 0.0f; for (y = 0; y < HEIGHT; y++) { for (x = 0; x < WIDTH; x++) { sum = 0.0f; for (k = 0; k < WIDTH; k++) { sum += A[y * WIDTH + k] * B[k * WIDTH + x]; } C[y * WIDTH + x] = sum; } } } /* * Naive CUDA kernel for matrix multiplication * * not called in this program, included for completeness and clarity. */ __global__ void matmul_kernel (float *C, float *A, float *B) { int x = blockIdx.x * BLOCK_X + threadIdx.x; int y = blockIdx.y * BLOCK_Y + threadIdx.y; int k; float sum = 0.0f; if ((x < WIDTH) && (y < HEIGHT)) { for (k = 0; k < WIDTH; k++) { sum += A[y * WIDTH + k] * B[k * WIDTH + x]; } C[y * WIDTH + x] = sum; } } /* * Slightly less naive CUDA kernel for matrix multiplication * * This implementation is used to compare the results of the produced * by the optimized and different schemes for overlapping communication * and computation. The main reason to use another kernel to compare * results is that a naive CPU version would take forever. * * In this kernel a thread block uses a tile of shared memory to * cooperatively load and store the values required for each computation * step. * */ __global__ void matmul_kernel_shared (float *C, float *A, float *B) { __shared__ float sA[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float sB[BLOCK_SIZE][BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int x = blockIdx.x * BLOCK_SIZE + threadIdx.x; int y = blockIdx.y * BLOCK_SIZE + threadIdx.y; int k, kb; float sum = 0.0f; if ((x < WIDTH) && (y < HEIGHT)) { for (k = 0; k < WIDTH; k += BLOCK_SIZE) { __syncthreads (); sA[ty][tx] = A[y * WIDTH + k + tx]; sB[ty][tx] = B[(k + ty) * WIDTH + x]; __syncthreads (); for (kb = 0; kb < BLOCK_SIZE; kb++) { sum += sA[ty][kb] * sB[kb][tx]; } } C[y * WIDTH + x] = sum; } } /* * Optimized CUDA kernel for matrix multiplication * * This kernel is optimized and tuned according to the directions given * in: "Better performance at lower occupancy" by V. Volkov, * GPU Technology Conference, GTC 2010. * * The thread block dimensions as well as tiling factors are tuned to- * wards each GPU used as part of our evaluation. * */ __global__ void matmul_kernel_opt (float *C, float *A, float *B) { __shared__ float sA[BLOCK_X][BLOCK_X]; __shared__ float sB[BLOCK_X][BLOCK_X * TILE_X]; int tx = threadIdx.x; int ty = threadIdx.y; int x = blockIdx.x * BLOCK_X * TILE_X + threadIdx.x; int y = blockIdx.y * BLOCK_Y * TILE_Y + threadIdx.y; int k, kb; #if(TILE_X == 8) #if(TILE_Y == 4) float sum[TILE_X][TILE_Y] = { {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0} }; #endif #elif(TILE_X == 2) #if(TILE_Y == 8) float sum[TILE_X][TILE_Y] = { {0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0} }; #endif #endif for (k = 0; k < WIDTH; k += BLOCK_X) { __syncthreads (); #pragma unroll for (int i = 0; i < TILE_Y; i++) { sA[ty + BLOCK_Y * i][tx] = A[y * WIDTH + BLOCK_Y * i * WIDTH + k + tx]; #pragma unroll for (int j = 0; j < TILE_X; j++) { sB[ty + BLOCK_Y * i][tx + j * BLOCK_X] = B[(k + ty + BLOCK_Y * i) * WIDTH + x + j * BLOCK_X]; } } __syncthreads (); //compute #pragma unroll for (kb = 0; kb < BLOCK_X; kb++) { #pragma unroll for (int i = 0; i < TILE_Y; i++) { #pragma unroll for (int j = 0; j < TILE_X; j++) { sum[j][i] += sA[ty + BLOCK_Y * i][kb] * sB[kb][tx + j * BLOCK_X]; } } } } //store result #pragma unroll for (int i = 0; i < TILE_Y; i++) { #pragma unroll for (int j = 0; j < TILE_X; j++) { C[y * WIDTH + x + BLOCK_Y * i * WIDTH + j * BLOCK_X] = sum[j][i]; } } } /* * Host code that invokes the matrix multiplication kernel * * The explicit implementation uses explicit memory copy * statements to move all data to the GPU, executes the * GPU kernel, and uses memory copies to copy the output * data back to host memory. This implementation achieves * no overlap between transfers and/or computation. * */ void matmul_explicit (float *C, float *A, float *B) { cudaError_t err; err = cudaMalloc ((void **) &d_A, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_A: %s\n", cudaGetErrorString (err)); } err = cudaMalloc ((void **) &d_B, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_B: %s\n", cudaGetErrorString (err)); } err = cudaMalloc ((void **) &d_C, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_C: %s\n", cudaGetErrorString (err)); } err = cudaMemset (d_C, 0, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemset d_C: %s\n", cudaGetErrorString (err)); } cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); dim3 threads (BLOCK_X, BLOCK_Y); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_X * TILE_X)), (int) ceilf ((float) HEIGHT / (float) (BLOCK_Y * TILE_Y))); float time; cudaDeviceSynchronize (); start_timer (); err = cudaMemcpyAsync (d_B, B, WIDTH * HEIGHT * sizeof (float), cudaMemcpyHostToDevice, stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device B: %s\n", cudaGetErrorString (err)); } err = cudaMemcpyAsync (d_A, A, WIDTH * HEIGHT * sizeof (float), cudaMemcpyHostToDevice, stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device A: %s\n", cudaGetErrorString (err)); } matmul_kernel_opt <<< grid, threads, 0, stream[1] >>> (d_C, d_A, d_B); err = cudaMemcpyAsync (C, d_C, WIDTH * HEIGHT * sizeof (float), cudaMemcpyDeviceToHost, stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy device to host C: %s\n", cudaGetErrorString (err)); } cudaDeviceSynchronize (); stop_timer (&time); cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After Explicit4"); printf ("EXPLICIT: %.6f ms\n", time); cudaDeviceSynchronize (); start_timer (); matmul_kernel_opt <<< grid, threads, 0, stream[1] >>> (d_C, d_A, d_B); cudaDeviceSynchronize (); stop_timer (&time); float flops = 2.0 * (WIDTH * HEIGHT) * (WIDTH); float giga = 1000000000.0; printf ("EXPLICIT kernel: %.6f ms\t %.3f GFLOP/s \n", time, (flops / giga) / (time / 1000.0)); cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel explicit"); cudaFree (d_C); cudaFree (d_B); cudaFree (d_A); } /* * Host code that invokes the matrix multiplication kernel * * The implicit implementation uses device-mapped host memory rather * than explicit memory copy statements. A different kernel is used * to ensure strictly coalesced access to system memory. * */ void matmul_implicit (float *C, float *A, float *B) { cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("before execution"); dim3 threads (BLOCK_X, BLOCK_Y); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_X * TILE_X)), (int) ceilf ((float) HEIGHT / (float) (BLOCK_Y * TILE_Y))); float time; cudaDeviceSynchronize (); start_timer (); matmul_kernel_opt <<< grid, threads, 0, stream[1] >>> (C, A, B); cudaDeviceSynchronize (); stop_timer (&time); printf ("IMPLICIT: %.6f ms\n", time); cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel"); } /* * Host code that invokes the matrix multiplication kernel * * The streams implementation uses CUDA streams combined * with explicit memory copy statements. This way transfers * in one stream may overlap with computation and transfers * in other streams. * */ void matmul_streams (float *C, float *A, float *B) { cudaError_t err; int k; err = cudaMalloc ((void **) &d_A, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_A: %s\n", cudaGetErrorString (err)); } err = cudaMalloc ((void **) &d_B, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_B: %s\n", cudaGetErrorString (err)); } err = cudaMalloc ((void **) &d_C, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_C: %s\n", cudaGetErrorString (err)); } err = cudaMemset (d_C, 0, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemset d_C: %s\n", cudaGetErrorString (err)); } cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); dim3 threads (BLOCK_X, BLOCK_Y); // dim3 grid( (int)ceilf((float)WIDTH / (float)(BLOCK_X)) , (int)ceilf((float)HEIGHT / (float)(BLOCK_Y))); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_X * TILE_X)), 1); // dim3 grid( (int)ceilf((float)WIDTH / (float)(BLOCK_X)) , 1); int lps = WIDTH * BLOCK_Y * TILE_Y; float time; cudaDeviceSynchronize (); start_timer (); err = cudaMemcpyAsync (d_B, B, WIDTH * HEIGHT * sizeof (float), cudaMemcpyHostToDevice, stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device B: %s\n", cudaGetErrorString (err)); } err = cudaEventRecord (event_htod[1], stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaEventRecord htod: %s\n", cudaGetErrorString (err)); } for (k = 0; k < NSTREAMS; k++) { err = cudaMemcpyAsync (d_A + k * lps, A + k * lps, lps * sizeof (float), cudaMemcpyHostToDevice, stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device scratch: %s\n", cudaGetErrorString (err)); } } for (k = 0; k < NSTREAMS; k++) { //wait for memcpy in stream 1 to be complete err = cudaStreamWaitEvent (stream[k], event_htod[1], 0); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaStreamWaitEvent htod 1: %s\n", cudaGetErrorString (err)); } matmul_kernel_opt <<< grid, threads, 0, stream[k] >>> (d_C + k * lps, d_A + k * lps, d_B); } for (k = 0; k < NSTREAMS; k++) { err = cudaMemcpyAsync (C + k * lps, d_C + k * lps, lps * sizeof (float), cudaMemcpyDeviceToHost, stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy device to host C: %s\n", cudaGetErrorString (err)); } } cudaDeviceSynchronize (); stop_timer (&time); printf ("STREAMS: %.6f ms\n", time); cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel"); cudaFree (d_C); cudaFree (d_B); cudaFree (d_A); } /* * Host code that invokes the matrix multiplication kernel * * The Hybrid implementation uses CUDA streams combined * with explicit memory copy statements for the input data * and uses device-mapped host memory to copy the output data * back to host memory. * */ void matmul_hybrid (float *C, float *A, float *B) { cudaError_t err; int k; err = cudaMalloc ((void **) &d_A, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_A: %s\n", cudaGetErrorString (err)); } err = cudaMalloc ((void **) &d_B, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_B: %s\n", cudaGetErrorString (err)); } cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); dim3 threads (BLOCK_X, BLOCK_Y); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_X * TILE_X)), 1); int lps = WIDTH * BLOCK_Y * TILE_Y; float time; cudaDeviceSynchronize (); start_timer (); err = cudaMemcpyAsync (d_B, B, WIDTH * HEIGHT * sizeof (float), cudaMemcpyHostToDevice, stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device B: %s\n", cudaGetErrorString (err)); } err = cudaEventRecord (event_htod[1], stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaEventRecord htod: %s\n", cudaGetErrorString (err)); } for (k = 0; k < NSTREAMS; k++) { err = cudaMemcpyAsync (d_A + k * lps, A + k * lps, lps * sizeof (float), cudaMemcpyHostToDevice, stream[k]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device scratch: %s\n", cudaGetErrorString (err)); } } for (k = 0; k < NSTREAMS; k++) { //wait for memcpy in stream 1 to be complete err = cudaStreamWaitEvent (stream[k], event_htod[1], 0); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaStreamWaitEvent htod 1: %s\n", cudaGetErrorString (err)); } matmul_kernel_opt <<< grid, threads, 0, stream[k] >>> (C + k * lps, d_A + k * lps, d_B); } cudaDeviceSynchronize (); stop_timer (&time); printf ("HYBRID: %.6f ms\n", time); cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel"); cudaFree (d_B); cudaFree (d_A); } /* * Host code that invokes the naive matrix multiplication kernel * * The naive kernel is used to verify results from the other * implementations. It uses explicit memory copy * statements to move all data to the GPU, executes the * naive kernel, and uses memory copies to copy the output * data back to host memory. This implementation achieves * no overlap between transfers and/or computation. * */ void matmul_naive (float *C, float *A, float *B) { cudaError_t err; err = cudaMalloc ((void **) &d_A, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_A: %s\n", cudaGetErrorString (err)); } err = cudaMalloc ((void **) &d_B, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_B: %s\n", cudaGetErrorString (err)); } err = cudaMalloc ((void **) &d_C, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMalloc d_C: %s\n", cudaGetErrorString (err)); } err = cudaMemset (d_C, 0, WIDTH * HEIGHT * sizeof (float)); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemset d_C: %s\n", cudaGetErrorString (err)); } cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After memory setup"); //NAIVE - DO NOT CHANGE - dim3 threads (BLOCK_SIZE, BLOCK_SIZE); dim3 grid ((int) ceilf ((float) WIDTH / (float) (BLOCK_SIZE)), (int) ceilf ((float) HEIGHT / (float) (BLOCK_SIZE))); float time; cudaDeviceSynchronize (); start_timer (); err = cudaMemcpyAsync (d_B, B, WIDTH * HEIGHT * sizeof (float), cudaMemcpyHostToDevice, stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device B: %s\n", cudaGetErrorString (err)); } err = cudaMemcpyAsync (d_A, A, WIDTH * HEIGHT * sizeof (float), cudaMemcpyHostToDevice, stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy host to device A: %s\n", cudaGetErrorString (err)); } //NAIVE - DO NOT CHANGE - matmul_kernel_shared <<< grid, threads, 0, stream[1] >>> (d_C, d_A, d_B); err = cudaMemcpyAsync (C, d_C, WIDTH * HEIGHT * sizeof (float), cudaMemcpyDeviceToHost, stream[1]); if (err != cudaSuccess) { fprintf (stderr, "Error in cudaMemcpy device to host C: %s\n", cudaGetErrorString (err)); } cudaDeviceSynchronize (); stop_timer (&time); cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After Naive"); printf ("NAIVE: %.6f ms\n", time); cudaDeviceSynchronize (); start_timer (); //NAIVE - DO NOT CHANGE - matmul_kernel_shared <<< grid, threads, 0, stream[1] >>> (d_C, d_A, d_B); cudaDeviceSynchronize (); stop_timer (&time); float flops = 2.0 * (WIDTH * HEIGHT) * (WIDTH); float giga = 1000000000.0; printf ("NAIVE kernel: %.6f ms\t %.3f GFLOP/s \n", time, (flops / giga) / (time / 1000.0)); cudaDeviceSynchronize (); CUDA_CHECK_ERROR ("After kernel explicit"); cudaFree (d_C); cudaFree (d_B); cudaFree (d_A); } int compare (float *a1, float *a2, int N) { int i = 0, res = 0; int print = 0; int zero_one = 0; int zero_two = 0; float eps = 0.000001; for (i = 0; i < N; i++) { if (a1[i] < eps && a1[i] > -eps) { zero_one++; } if (a2[i] < eps && a2[i] > -eps) { zero_two++; } if (isnan (a1[i]) || isnan (a2[i])) { res++; if (print < 10) { print++; fprintf (stderr, "Error detected at i=%d,\t a1= %10.7e \t a2= \t %10.7e\n", i, a1[i], a2[i]); } } float diff = a1[i] - a2[i]; if (diff > eps || diff < -eps) { res++; if (print < 10) { print++; fprintf (stderr, "Error detected at i=%d,\t a1= \t %10.7e \t a2= \t %10.7e\n", i, a1[i], a2[i]); } } } if (zero_one > (N / 4)) { fprintf (stderr, "Error: array1 contains %d zeros\n", zero_one); } if (zero_two > (N / 4)) { fprintf (stderr, "Error: array2 contains %d zeros\n", zero_two); } if (zero_one != zero_two) { fprintf (stderr, "Error: number of zeros in arrays dont correspond zero1=%d, zero2=%d\n", zero_one, zero_two); } if (res > 0) { fprintf (stdout, "Number of errors in GPU result: %d\n", res); } return res; }
44f43cac421038a5e4b423473d26a6be2aea0a37.hip
// !!! This is a file automatically generated by hipify!!! //raytracer.mustafaisik.net// #include "world.cuh" #include "hip/hip_runtime.h" int main() { { World world; world.loadScene("input//cornellbox//scene-realtime.xml"); world.video(); } return 0; }
44f43cac421038a5e4b423473d26a6be2aea0a37.cu
//raytracer.mustafaisik.net// #include "world.cuh" #include "cuda_runtime.h" int main() { { World world; world.loadScene("input//cornellbox//scene-realtime.xml"); world.video(); } return 0; }
e81f185497207ac01c9efc7796266001e0c3b981.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "Sphere.h" #include "RaytracingMath.h" #include "length_cm.h" #include "IndiceTools_GPU.h" using namespace gpu; static __device__ void copyGMtoSM(Sphere* ptrDevTabGM, Sphere* ptrDevTabSM, uint nbSphere); static __device__ void raytracing(uchar4* ptrDevPixels, uint w, uint h, float t, Sphere* ptrDevTabSphere, uint nbSphere); __host__ void uploadGPU(Sphere* ptrTabSphere); __constant__ Sphere TAB_CM[LENGTH_CM]; __constant__ Sphere TAB_SM[LENGTH_CM]; /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ __host__ void uploadGPU(Sphere* ptrTabSphere) { size_t size = LENGTH_CM * sizeof(Sphere); int offset = 0; HANDLE_ERROR(hipMemcpyToSymbol(TAB_CM, ptrTabSphere, size, offset, hipMemcpyHostToDevice)); } __device__ void copyGMtoSM(Sphere* ptrDevTabGM, Sphere* ptrDevTabSM, uint nbSphere) { const int TID_LOCAL = Indice2D::tidLocal(); const int NB_THREAD_LOCAL = Indice2D::nbThreadLocal(); int s = TID_LOCAL; while (s < nbSphere) { ptrDevTabSM[s] = ptrDevTabGM[s]; s += NB_THREAD_LOCAL; } } /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void raytracingGM(uchar4* ptrDevPixels, uint w, uint h, float t, Sphere* ptrDevTabSphere, uint nbSphere) { raytracing(ptrDevPixels, w, h, t, ptrDevTabSphere, nbSphere); } __global__ void raytracingCM(uchar4* ptrDevPixels, uint w, uint h, float t) { raytracing(ptrDevPixels, w, h, t, TAB_CM, LENGTH_CM); } __global__ void raytracingSM(uchar4* ptrDevPixels, uint w, uint h, float t, Sphere* ptrDevTabSphere, uint nbSphere) { extern __shared__ Sphere ptrDevTabSM[]; copyGMtoSM(ptrDevTabSphere, ptrDevTabSM, nbSphere); __syncthreads(); raytracing(ptrDevPixels, w, h, t, ptrDevTabSphere, nbSphere); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void raytracing(uchar4* ptrDevPixels, uint w, uint h, float t, Sphere* ptrDevTabSphere, uint nbSphere) { RaytracingMath raytracingMath = RaytracingMath(ptrDevTabSphere, nbSphere); const int WH = w * h; const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); int i, j; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); raytracingMath.colorIJ(&ptrDevPixels[s], (float) i, (float) j, t); s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
e81f185497207ac01c9efc7796266001e0c3b981.cu
#include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "Sphere.h" #include "RaytracingMath.h" #include "length_cm.h" #include "IndiceTools_GPU.h" using namespace gpu; static __device__ void copyGMtoSM(Sphere* ptrDevTabGM, Sphere* ptrDevTabSM, uint nbSphere); static __device__ void raytracing(uchar4* ptrDevPixels, uint w, uint h, float t, Sphere* ptrDevTabSphere, uint nbSphere); __host__ void uploadGPU(Sphere* ptrTabSphere); __constant__ Sphere TAB_CM[LENGTH_CM]; __constant__ Sphere TAB_SM[LENGTH_CM]; /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ __host__ void uploadGPU(Sphere* ptrTabSphere) { size_t size = LENGTH_CM * sizeof(Sphere); int offset = 0; HANDLE_ERROR(cudaMemcpyToSymbol(TAB_CM, ptrTabSphere, size, offset, cudaMemcpyHostToDevice)); } __device__ void copyGMtoSM(Sphere* ptrDevTabGM, Sphere* ptrDevTabSM, uint nbSphere) { const int TID_LOCAL = Indice2D::tidLocal(); const int NB_THREAD_LOCAL = Indice2D::nbThreadLocal(); int s = TID_LOCAL; while (s < nbSphere) { ptrDevTabSM[s] = ptrDevTabGM[s]; s += NB_THREAD_LOCAL; } } /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void raytracingGM(uchar4* ptrDevPixels, uint w, uint h, float t, Sphere* ptrDevTabSphere, uint nbSphere) { raytracing(ptrDevPixels, w, h, t, ptrDevTabSphere, nbSphere); } __global__ void raytracingCM(uchar4* ptrDevPixels, uint w, uint h, float t) { raytracing(ptrDevPixels, w, h, t, TAB_CM, LENGTH_CM); } __global__ void raytracingSM(uchar4* ptrDevPixels, uint w, uint h, float t, Sphere* ptrDevTabSphere, uint nbSphere) { extern __shared__ Sphere ptrDevTabSM[]; copyGMtoSM(ptrDevTabSphere, ptrDevTabSM, nbSphere); __syncthreads(); raytracing(ptrDevPixels, w, h, t, ptrDevTabSphere, nbSphere); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void raytracing(uchar4* ptrDevPixels, uint w, uint h, float t, Sphere* ptrDevTabSphere, uint nbSphere) { RaytracingMath raytracingMath = RaytracingMath(ptrDevTabSphere, nbSphere); const int WH = w * h; const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); int i, j; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); raytracingMath.colorIJ(&ptrDevPixels[s], (float) i, (float) j, t); s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
0d1517869cb6d5c417b178b8359f7ac481ec2ac0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrix_multiply_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *temp = NULL; hipMalloc(&temp, XSIZE*YSIZE); unsigned char *matrix = NULL; hipMalloc(&matrix, XSIZE*YSIZE); float *kernal = NULL; hipMalloc(&kernal, XSIZE*YSIZE); int order = 1; int middle = 1; int windowSizeX = XSIZE*YSIZE; int windowSizeY = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrix_multiply_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temp,matrix,kernal,order,middle,windowSizeX,windowSizeY); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrix_multiply_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temp,matrix,kernal,order,middle,windowSizeX,windowSizeY); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrix_multiply_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temp,matrix,kernal,order,middle,windowSizeX,windowSizeY); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0d1517869cb6d5c417b178b8359f7ac481ec2ac0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrix_multiply_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *temp = NULL; cudaMalloc(&temp, XSIZE*YSIZE); unsigned char *matrix = NULL; cudaMalloc(&matrix, XSIZE*YSIZE); float *kernal = NULL; cudaMalloc(&kernal, XSIZE*YSIZE); int order = 1; int middle = 1; int windowSizeX = XSIZE*YSIZE; int windowSizeY = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrix_multiply_kernel<<<gridBlock,threadBlock>>>(temp,matrix,kernal,order,middle,windowSizeX,windowSizeY); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrix_multiply_kernel<<<gridBlock,threadBlock>>>(temp,matrix,kernal,order,middle,windowSizeX,windowSizeY); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrix_multiply_kernel<<<gridBlock,threadBlock>>>(temp,matrix,kernal,order,middle,windowSizeX,windowSizeY); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d2bb899cc3adea9593b0b56232f5efc48ade70f4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/types.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define TAG_BYTES 10 #define GRID_X 32 #define GRID_Y 32 #define BLOCK_X 32 #define BLOCK_Y 32 #define ALPHABET_LEN 256 #define NOT_FOUND patlen #define max(a, b) ((a < b) ? b : a) const char *DAILY = "./dailyPack/dailyGPUsig.bin"; const char *MAIN = "./mainPack/mainGPUsig.bin"; //referenced //from http://en.wikipedia.org/wiki/BoyerMoore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ void make_delta1(int *delta1, uint8_t *pat, int32_t patlen) { int i; for (i=0; i < ALPHABET_LEN; i++) { delta1[i] = NOT_FOUND; } for (i=0; i < patlen-1; i++) { delta1[pat[i]] = patlen-1 - i; } } //referenced //from http://en.wikipedia.org/wiki/BoyerMoore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ int is_prefix(uint8_t *word, int wordlen, int pos) { int i; int suffixlen = wordlen - pos; // could also use the strncmp() library function here for (i = 0; i < suffixlen; i++) { if (word[i] != word[pos+i]) { return 0; } } return 1; } //referenced //from http://en.wikipedia.org/wiki/BoyerMoore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, // length of the longest suffix of word ending on word[pos]. // suffix_length("dddbcabc", 8, 4) = 2 __device__ int suffix_length(uint8_t *word, int wordlen, int pos) { int i; // increment suffix length i to the first mismatch or beginning // of the word for (i = 0; (word[pos-i] == word[wordlen-1-i]) && (i < pos); i++); return i; } //referenced //from http://en.wikipedia.org/wiki/BoyerMoore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ void make_delta2(int *delta2, uint8_t *pat, int32_t patlen) { int p; int last_prefix_index = patlen-1; // first loop for (p=patlen-1; p>=0; p--) { if (is_prefix(pat, patlen, p+1)) { last_prefix_index = p+1; } delta2[p] = last_prefix_index + (patlen-1 - p); } // second loop for (p=0; p < patlen-1; p++) { int slen = suffix_length(pat, patlen, p); if (pat[p - slen] != pat[patlen-1 - slen]) { delta2[patlen-1 - slen] = patlen-1 - p + slen; } } } //referenced //from http://en.wikipedia.org/wiki/BoyerMoore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ uint8_t* boyer_moore (uint8_t *string, uint32_t stringlen, uint8_t *pat, uint32_t patlen) { int i; int delta1[ALPHABET_LEN]; int *delta2 = (int *)malloc(patlen * sizeof(int)); make_delta1(delta1, pat, patlen); make_delta2(delta2, pat, patlen); // The empty pattern must be considered specially if (patlen == 0) return string; i = patlen-1; while (i < stringlen) { int j = patlen-1; while (j >= 0 && (string[i] == pat[j])) { --i; --j; } if (j < 0) { free(delta2); return (string + i+1); } i += max(delta1[string[i]], delta2[j]); } free(delta2); return NULL; } __global__ void patternMatching(uint8_t *set1, uint8_t *set2, uint8_t *fileBuf, int set1SigNum, int set2SigNum, int fileSize){ //note: blockDim.x = blockDim.y int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int idx = row*GRID_Y*BLOCK_Y + col; //GRID AND BLOCK are hardcoded for convenience uint8_t *found; //make sure that the idx is within the range of total number of signatures if(idx < set1SigNum){ found = boyer_moore(fileBuf,fileSize,set1+idx*TAG_BYTES,TAG_BYTES); if(found != NULL){ printf("found virus, lookup dailyGPUvirus.ndb line %d for virus type\n",idx); } } //make sure that the idx is within the range of total number of signatures if(idx >= set1SigNum && idx < set2SigNum){ found = boyer_moore(fileBuf, fileSize, set2+(idx-set1SigNum)*TAG_BYTES, TAG_BYTES); if(found != NULL){ printf("found virus, lookup mainGPUvirus.ndb line %d for virus type\n",(idx-set1SigNum)); } } } //function to load input file and signature files to scan void loadFile (const char *fileName, uint8_t **buffer, size_t *size){ long lSize; FILE *fp; fp = fopen (fileName , "rb" ); if( !fp ) perror(fileName),exit(1); //seek the beginning of file //fseek(fp, SEEK_SET, 0); fseek( fp , 0L , SEEK_END); lSize = ftell( fp ); rewind( fp ); //printf("%ld\n",lSize); (*size) = lSize; /* allocate memory for entire content */ (*buffer) = (uint8_t *) calloc( 1, lSize+1 ); if( !(*buffer) ) fclose(fp),fputs("memory alloc fails",stderr),exit(1); /* copy the file into the buffer */ if( 1!=fread( (*buffer) , lSize, 1 , fp) ) fclose(fp),free((*buffer)),fputs("entire read fails",stderr),exit(1); fclose(fp); } /* * Exit codes: * 0: clean * 1: infected * 2: error */ //const char *DBDIR = "/home/leon/clamav/share/clamav"; int main(int argc, char **argv) { int gpucount = 0; // Count of available GPUs //We only have 3701312 signatures //each thread get 1 signature, we need no more than 1024*1024 threads //grid size is then fixed to (32,32,1), and block size is (32,32,1) int Grid_Dim_x = GRID_X; //Grid dimension, x int Grid_Dim_y = GRID_Y; //Grid dimension, y int Block_Dim_x = BLOCK_X; //Block dimension, x int Block_Dim_y = BLOCK_Y; //Block dimension, y hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also hipError_t errorcode; //host buffer to store each signature dataset uint8_t *dailyBuf; uint8_t *mainBuf; uint8_t *fileBuf; uint8_t *devDb, *devMb, *devFb;//device buffer correspoding to the host buffer size_t sizeDb, sizeMb, sizeFb; if(argc != 2) { printf("Usage: %s file\n", argv[0]); return 2; } // --------------------SET PARAMETERS AND DATA ----------------------- //load signatures into host buffer loadFile(DAILY, &dailyBuf, &sizeDb); loadFile(MAIN, &mainBuf, &sizeMb); printf("loading signatures in %s\n",DAILY); printf("loading signatures in %s\n",MAIN); /* for(int i=0; i<11; i++){ printf("%x ", (unsigned uint8_t) dailyBuf[i]); } */ errorcode = hipGetDeviceCount(&gpucount); if (errorcode == hipErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } //alloc mem to GPU hipMalloc((void**)&devDb, sizeDb*sizeof(uint8_t)); hipMalloc((void**)&devMb, sizeMb*sizeof(uint8_t)); //copy sigs to GPU mem buffer hipMemcpy(devDb, dailyBuf, sizeDb ,hipMemcpyHostToDevice); hipMemcpy(devMb, mainBuf, sizeMb ,hipMemcpyHostToDevice); printf("Loaded %ld signatures.\n", (sizeDb+sizeMb)/TAG_BYTES); if (Block_Dim_x * Block_Dim_y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } //loading files into file buffer loadFile(argv[1], &fileBuf, &sizeFb); //alloc mem for files on GPU hipMalloc((void**)&devFb, sizeFb*sizeof(uint8_t)); //cp mem from host to GPU hipMemcpy(devFb, fileBuf, sizeFb ,hipMemcpyHostToDevice); //declare GPU params dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( patternMatching), dim3(Grid), dim3(Block), 0, 0, devDb, devMb, devFb, sizeDb/TAG_BYTES, sizeMb/TAG_BYTES, sizeFb); // make the host block until the device is finished with foo hipDeviceSynchronize(); // check for error errorcode = hipGetLastError(); if(errorcode != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(errorcode)); exit(-1); } hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time free(mainBuf); free(dailyBuf); free(fileBuf); hipFree(devMb); hipFree(devDb); hipFree(devFb); return 0; }
d2bb899cc3adea9593b0b56232f5efc48ade70f4.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/types.h> #include <stdint.h> #include <cuda.h> #include <cuda_runtime.h> #define TAG_BYTES 10 #define GRID_X 32 #define GRID_Y 32 #define BLOCK_X 32 #define BLOCK_Y 32 #define ALPHABET_LEN 256 #define NOT_FOUND patlen #define max(a, b) ((a < b) ? b : a) const char *DAILY = "./dailyPack/dailyGPUsig.bin"; const char *MAIN = "./mainPack/mainGPUsig.bin"; //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ void make_delta1(int *delta1, uint8_t *pat, int32_t patlen) { int i; for (i=0; i < ALPHABET_LEN; i++) { delta1[i] = NOT_FOUND; } for (i=0; i < patlen-1; i++) { delta1[pat[i]] = patlen-1 - i; } } //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ int is_prefix(uint8_t *word, int wordlen, int pos) { int i; int suffixlen = wordlen - pos; // could also use the strncmp() library function here for (i = 0; i < suffixlen; i++) { if (word[i] != word[pos+i]) { return 0; } } return 1; } //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, // length of the longest suffix of word ending on word[pos]. // suffix_length("dddbcabc", 8, 4) = 2 __device__ int suffix_length(uint8_t *word, int wordlen, int pos) { int i; // increment suffix length i to the first mismatch or beginning // of the word for (i = 0; (word[pos-i] == word[wordlen-1-i]) && (i < pos); i++); return i; } //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ void make_delta2(int *delta2, uint8_t *pat, int32_t patlen) { int p; int last_prefix_index = patlen-1; // first loop for (p=patlen-1; p>=0; p--) { if (is_prefix(pat, patlen, p+1)) { last_prefix_index = p+1; } delta2[p] = last_prefix_index + (patlen-1 - p); } // second loop for (p=0; p < patlen-1; p++) { int slen = suffix_length(pat, patlen, p); if (pat[p - slen] != pat[patlen-1 - slen]) { delta2[patlen-1 - slen] = patlen-1 - p + slen; } } } //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ uint8_t* boyer_moore (uint8_t *string, uint32_t stringlen, uint8_t *pat, uint32_t patlen) { int i; int delta1[ALPHABET_LEN]; int *delta2 = (int *)malloc(patlen * sizeof(int)); make_delta1(delta1, pat, patlen); make_delta2(delta2, pat, patlen); // The empty pattern must be considered specially if (patlen == 0) return string; i = patlen-1; while (i < stringlen) { int j = patlen-1; while (j >= 0 && (string[i] == pat[j])) { --i; --j; } if (j < 0) { free(delta2); return (string + i+1); } i += max(delta1[string[i]], delta2[j]); } free(delta2); return NULL; } __global__ void patternMatching(uint8_t *set1, uint8_t *set2, uint8_t *fileBuf, int set1SigNum, int set2SigNum, int fileSize){ //note: blockDim.x = blockDim.y int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int idx = row*GRID_Y*BLOCK_Y + col; //GRID AND BLOCK are hardcoded for convenience uint8_t *found; //make sure that the idx is within the range of total number of signatures if(idx < set1SigNum){ found = boyer_moore(fileBuf,fileSize,set1+idx*TAG_BYTES,TAG_BYTES); if(found != NULL){ printf("found virus, lookup dailyGPUvirus.ndb line %d for virus type\n",idx); } } //make sure that the idx is within the range of total number of signatures if(idx >= set1SigNum && idx < set2SigNum){ found = boyer_moore(fileBuf, fileSize, set2+(idx-set1SigNum)*TAG_BYTES, TAG_BYTES); if(found != NULL){ printf("found virus, lookup mainGPUvirus.ndb line %d for virus type\n",(idx-set1SigNum)); } } } //function to load input file and signature files to scan void loadFile (const char *fileName, uint8_t **buffer, size_t *size){ long lSize; FILE *fp; fp = fopen (fileName , "rb" ); if( !fp ) perror(fileName),exit(1); //seek the beginning of file //fseek(fp, SEEK_SET, 0); fseek( fp , 0L , SEEK_END); lSize = ftell( fp ); rewind( fp ); //printf("%ld\n",lSize); (*size) = lSize; /* allocate memory for entire content */ (*buffer) = (uint8_t *) calloc( 1, lSize+1 ); if( !(*buffer) ) fclose(fp),fputs("memory alloc fails",stderr),exit(1); /* copy the file into the buffer */ if( 1!=fread( (*buffer) , lSize, 1 , fp) ) fclose(fp),free((*buffer)),fputs("entire read fails",stderr),exit(1); fclose(fp); } /* * Exit codes: * 0: clean * 1: infected * 2: error */ //const char *DBDIR = "/home/leon/clamav/share/clamav"; int main(int argc, char **argv) { int gpucount = 0; // Count of available GPUs //We only have 3701312 signatures //each thread get 1 signature, we need no more than 1024*1024 threads //grid size is then fixed to (32,32,1), and block size is (32,32,1) int Grid_Dim_x = GRID_X; //Grid dimension, x int Grid_Dim_y = GRID_Y; //Grid dimension, y int Block_Dim_x = BLOCK_X; //Block dimension, x int Block_Dim_y = BLOCK_Y; //Block dimension, y cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also cudaError_t errorcode; //host buffer to store each signature dataset uint8_t *dailyBuf; uint8_t *mainBuf; uint8_t *fileBuf; uint8_t *devDb, *devMb, *devFb;//device buffer correspoding to the host buffer size_t sizeDb, sizeMb, sizeFb; if(argc != 2) { printf("Usage: %s file\n", argv[0]); return 2; } // --------------------SET PARAMETERS AND DATA ----------------------- //load signatures into host buffer loadFile(DAILY, &dailyBuf, &sizeDb); loadFile(MAIN, &mainBuf, &sizeMb); printf("loading signatures in %s\n",DAILY); printf("loading signatures in %s\n",MAIN); /* for(int i=0; i<11; i++){ printf("%x ", (unsigned uint8_t) dailyBuf[i]); } */ errorcode = cudaGetDeviceCount(&gpucount); if (errorcode == cudaErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } //alloc mem to GPU cudaMalloc((void**)&devDb, sizeDb*sizeof(uint8_t)); cudaMalloc((void**)&devMb, sizeMb*sizeof(uint8_t)); //copy sigs to GPU mem buffer cudaMemcpy(devDb, dailyBuf, sizeDb ,cudaMemcpyHostToDevice); cudaMemcpy(devMb, mainBuf, sizeMb ,cudaMemcpyHostToDevice); printf("Loaded %ld signatures.\n", (sizeDb+sizeMb)/TAG_BYTES); if (Block_Dim_x * Block_Dim_y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } //loading files into file buffer loadFile(argv[1], &fileBuf, &sizeFb); //alloc mem for files on GPU cudaMalloc((void**)&devFb, sizeFb*sizeof(uint8_t)); //cp mem from host to GPU cudaMemcpy(devFb, fileBuf, sizeFb ,cudaMemcpyHostToDevice); //declare GPU params dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); patternMatching<<<Grid, Block>>>(devDb, devMb, devFb, sizeDb/TAG_BYTES, sizeMb/TAG_BYTES, sizeFb); // make the host block until the device is finished with foo cudaThreadSynchronize(); // check for error errorcode = cudaGetLastError(); if(errorcode != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(errorcode)); exit(-1); } cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time free(mainBuf); free(dailyBuf); free(fileBuf); cudaFree(devMb); cudaFree(devDb); cudaFree(devFb); return 0; }
738357ea5a361cec053fab930cc64e9cb15ad62f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void add(int *a, int *b, int *c,int size) { c[size*blockIdx.x+ threadIdx.x] = a[size*blockIdx.x+ threadIdx.x] + b[size*blockIdx.x+ threadIdx.x]; }
738357ea5a361cec053fab930cc64e9cb15ad62f.cu
#include "includes.h" __global__ void add(int *a, int *b, int *c,int size) { c[size*blockIdx.x+ threadIdx.x] = a[size*blockIdx.x+ threadIdx.x] + b[size*blockIdx.x+ threadIdx.x]; }
b3678de62034d504253dc4146f64a686c359d06f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> //#include <hiprand/hiprand.h> #include <stdio.h> #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ > 200 // // This version creates k samples per input feature, per iteration (with a multinomial random generator). // A and B are the factor matrices. Cir, Cic the row, column indices of the sparse matrix S, P its values, and nnz its size. // S holds inner products A[:,i] with B[:,j]. Ms holds model samples, Us holds user samples. // __global__ void __LDA_Gibbs1(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P, int *Ms, int *Us, int k, hiprandState_t *rstates) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int id = threadIdx.x + k*blockIdx.x; hiprandState_t rstate; if (threadIdx.x < k) { rstate = rstates[id]; } for (int j = jstart; j < jend ; j++) { int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; float cr; if (threadIdx.x < k) { cr = P[j] * hiprand_uniform(&rstate); } int tid = threadIdx.x; float sum = 0; while (tid < nrows) { float tot = A[tid + aoff] * B[tid + boff]; float tmp = __shfl_up(tot, 1); if (threadIdx.x >= 1) tot += tmp; tmp = __shfl_up(tot, 2); if (threadIdx.x >= 2) tot += tmp; tmp = __shfl_up(tot, 4); if (threadIdx.x >= 4) tot += tmp; tmp = __shfl_up(tot, 8); if (threadIdx.x >= 8) tot += tmp; tmp = __shfl_up(tot, 0x10); if (threadIdx.x >= 0x10) tot += tmp; float bsum = sum; sum += tot; tmp = __shfl_up(sum, 1); if (threadIdx.x > 0) { bsum = tmp; } for (int i = 0; i < k; i++) { float crx = __shfl(cr, i); if (crx > bsum && crx <= sum) { Ms[i + j*k] = tid + aoff; Us[i + j*k] = tid + boff; } } sum = __shfl(sum, 0x1f); tid += blockDim.x; } } } __global__ void __LDA_Gibbsy(int nrows, int ncols, float *A, float *B, float *AN, int *Cir, int *Cjc, float *P, float nsamps, hiprandState_t *rstates) { __shared__ float merge[32]; int jstart = ((long long)blockIdx.x) * ncols / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; int id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); hiprandState_t rstate = rstates[id]; float prod, sum, bsum, user; int aoff, boff; for (int j0 = jstart; j0 < jend ; j0++) { boff = nrows * j0; user = B[tid + boff]; for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) { aoff = nrows * Cir[j]; prod = A[tid + aoff] * user; sum = prod + __shfl_down(prod, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); bsum = __shfl(sum, 0); __syncthreads(); if (threadIdx.x == threadIdx.y) { merge[threadIdx.x] = bsum; } __syncthreads(); if (threadIdx.y == 0) { sum = merge[threadIdx.x]; sum = sum + __shfl_down(sum, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); bsum = __shfl(sum, 0); merge[threadIdx.x] = bsum; } __syncthreads(); if (threadIdx.x == threadIdx.y) { sum = merge[threadIdx.x]; } bsum = __shfl(sum, threadIdx.y); float pval = nsamps / bsum; int cr = hiprand_poisson(&rstate, prod * pval); if (cr > 0) { atomicAdd(&AN[tid + aoff], cr); user += cr; } } B[tid + boff] = user; } } // // This version uses Poisson RNG to generate several random numbers per point, per iteration. // nrows is number of rows in models A and B. A is nrows * nfeats, B is nrows * nusers // AN and BN are updaters for A and B and hold sample counts from this iteration. // Cir anc Cic are row and column indices for the sparse matrix. // P holds the inner products (results of a call to dds) of A and B columns corresponding to Cir[j] and Cic[j] // nsamps is the expected number of samples to compute for this iteration - its just a multiplier // for individual poisson lambdas. // __global__ void __LDA_Gibbs(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float nsamps, hiprandState_t *rstates) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; int id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); hiprandState_t rstate = rstates[id]; for (int j = jstart; j < jend ; j++) { int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; float pval = nsamps / P[j]; for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) { float prod = A[i + aoff] * B[i + boff]; int cr = hiprand_poisson(&rstate, prod * pval); if (cr > 0) { atomicAdd(&AN[i + aoff], cr); atomicAdd(&BN[i + boff], cr); } } } } __global__ void __randinit(hiprandState_t *rstates) { int id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); hiprand_init(1234, id, 0, &rstates[id]); } #else __global__ void __LDA_Gibbs1(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P, int *Ms, int *Us, int k, hiprandState_t *) {} __global__ void __LDA_Gibbs(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float nsamps, hiprandState_t *) {} __global__ void __LDA_Gibbsy(int nrows, int nnz, float *A, float *B, float *AN, int *Cir, int *Cic, float *P, float nsamps, hiprandState_t *) {} __global__ void __randinit(hiprandState_t *rstates) {} #endif #else __global__ void __LDA_Gibbs1(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P, int *Ms, int *Us, int k, hiprandState_t *) {} __global__ void __LDA_Gibbs(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float nsamps, hiprandState_t *) {} __global__ void __LDA_Gibbsy(int nrows, int nnz, float *A, float *B, float *AN, int *Cir, int *Cic, float *P, float nsamps, hiprandState_t *) {} __global__ void __randinit(hiprandState_t *rstates) {} #endif #define DDS_BLKY 32 int LDA_Gibbs1(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P, int *Ms, int *Us, int k) { int nblocks = min(1024, max(1,nnz/128)); hiprandState_t *rstates; int err; err = hipMalloc(( void **)& rstates , k * nblocks * sizeof(hiprandState_t)); if (err > 0) { fprintf(stderr, "Error in hipMalloc %d", err); return err; } hipDeviceSynchronize(); hipLaunchKernelGGL(( __randinit), dim3(nblocks),dim3(k), 0, 0, rstates); hipDeviceSynchronize(); hipLaunchKernelGGL(( __LDA_Gibbs1), dim3(nblocks),dim3(32), 0, 0, nrows, nnz, A, B, Cir, Cic, P, Ms, Us, k, rstates); hipDeviceSynchronize(); hipFree(rstates); err = hipGetLastError(); return err; } int LDA_Gibbs(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float nsamps) { dim3 blockDims(min(32,nrows), min(32, 1+(nrows-1)/64), 1); int nblocks = min(128, max(1,nnz/128)); hiprandState_t *rstates; int err; err = hipMalloc(( void **)& rstates , nblocks * blockDims.x * blockDims.y * sizeof(hiprandState_t)); if (err > 0) { fprintf(stderr, "Error in hipMalloc %d", err); return err; } hipDeviceSynchronize(); hipLaunchKernelGGL(( __randinit), dim3(nblocks),dim3(blockDims), 0, 0, rstates); hipDeviceSynchronize(); hipLaunchKernelGGL(( __LDA_Gibbs), dim3(nblocks),dim3(blockDims), 0, 0, nrows, nnz, A, B, AN, BN, Cir, Cic, P, nsamps, rstates); hipDeviceSynchronize(); hipFree(rstates); err = hipGetLastError(); return err; } int LDA_Gibbsy(int nrows, int ncols, float *A, float *B, float *AN, int *Cir, int *Cic, float *P, float nsamps) { dim3 blockDims(32, 32); int nblocks = min(128, max(1,ncols/2)); hiprandState_t *rstates; int err; err = hipMalloc(( void **)& rstates , nblocks * blockDims.x * blockDims.y * sizeof(hiprandState_t)); if (err > 0) { fprintf(stderr, "Error in hipMalloc %d", err); return err; } hipDeviceSynchronize(); hipLaunchKernelGGL(( __randinit), dim3(nblocks),dim3(blockDims), 0, 0, rstates); hipDeviceSynchronize(); hipLaunchKernelGGL(( __LDA_Gibbsy), dim3(nblocks),dim3(blockDims), 0, 0, nrows, ncols, A, B, AN, Cir, Cic, P, nsamps, rstates); hipDeviceSynchronize(); hipFree(rstates); err = hipGetLastError(); return err; }
b3678de62034d504253dc4146f64a686c359d06f.cu
#include <cuda_runtime.h> #include <curand_kernel.h> //#include <curand.h> #include <stdio.h> #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ > 200 // // This version creates k samples per input feature, per iteration (with a multinomial random generator). // A and B are the factor matrices. Cir, Cic the row, column indices of the sparse matrix S, P its values, and nnz its size. // S holds inner products A[:,i] with B[:,j]. Ms holds model samples, Us holds user samples. // __global__ void __LDA_Gibbs1(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P, int *Ms, int *Us, int k, curandState *rstates) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int id = threadIdx.x + k*blockIdx.x; curandState rstate; if (threadIdx.x < k) { rstate = rstates[id]; } for (int j = jstart; j < jend ; j++) { int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; float cr; if (threadIdx.x < k) { cr = P[j] * curand_uniform(&rstate); } int tid = threadIdx.x; float sum = 0; while (tid < nrows) { float tot = A[tid + aoff] * B[tid + boff]; float tmp = __shfl_up(tot, 1); if (threadIdx.x >= 1) tot += tmp; tmp = __shfl_up(tot, 2); if (threadIdx.x >= 2) tot += tmp; tmp = __shfl_up(tot, 4); if (threadIdx.x >= 4) tot += tmp; tmp = __shfl_up(tot, 8); if (threadIdx.x >= 8) tot += tmp; tmp = __shfl_up(tot, 0x10); if (threadIdx.x >= 0x10) tot += tmp; float bsum = sum; sum += tot; tmp = __shfl_up(sum, 1); if (threadIdx.x > 0) { bsum = tmp; } for (int i = 0; i < k; i++) { float crx = __shfl(cr, i); if (crx > bsum && crx <= sum) { Ms[i + j*k] = tid + aoff; Us[i + j*k] = tid + boff; } } sum = __shfl(sum, 0x1f); tid += blockDim.x; } } } __global__ void __LDA_Gibbsy(int nrows, int ncols, float *A, float *B, float *AN, int *Cir, int *Cjc, float *P, float nsamps, curandState *rstates) { __shared__ float merge[32]; int jstart = ((long long)blockIdx.x) * ncols / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; int id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); curandState rstate = rstates[id]; float prod, sum, bsum, user; int aoff, boff; for (int j0 = jstart; j0 < jend ; j0++) { boff = nrows * j0; user = B[tid + boff]; for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) { aoff = nrows * Cir[j]; prod = A[tid + aoff] * user; sum = prod + __shfl_down(prod, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); bsum = __shfl(sum, 0); __syncthreads(); if (threadIdx.x == threadIdx.y) { merge[threadIdx.x] = bsum; } __syncthreads(); if (threadIdx.y == 0) { sum = merge[threadIdx.x]; sum = sum + __shfl_down(sum, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); bsum = __shfl(sum, 0); merge[threadIdx.x] = bsum; } __syncthreads(); if (threadIdx.x == threadIdx.y) { sum = merge[threadIdx.x]; } bsum = __shfl(sum, threadIdx.y); float pval = nsamps / bsum; int cr = curand_poisson(&rstate, prod * pval); if (cr > 0) { atomicAdd(&AN[tid + aoff], cr); user += cr; } } B[tid + boff] = user; } } // // This version uses Poisson RNG to generate several random numbers per point, per iteration. // nrows is number of rows in models A and B. A is nrows * nfeats, B is nrows * nusers // AN and BN are updaters for A and B and hold sample counts from this iteration. // Cir anc Cic are row and column indices for the sparse matrix. // P holds the inner products (results of a call to dds) of A and B columns corresponding to Cir[j] and Cic[j] // nsamps is the expected number of samples to compute for this iteration - its just a multiplier // for individual poisson lambdas. // __global__ void __LDA_Gibbs(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float nsamps, curandState *rstates) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; int id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); curandState rstate = rstates[id]; for (int j = jstart; j < jend ; j++) { int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; float pval = nsamps / P[j]; for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) { float prod = A[i + aoff] * B[i + boff]; int cr = curand_poisson(&rstate, prod * pval); if (cr > 0) { atomicAdd(&AN[i + aoff], cr); atomicAdd(&BN[i + boff], cr); } } } } __global__ void __randinit(curandState *rstates) { int id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); curand_init(1234, id, 0, &rstates[id]); } #else __global__ void __LDA_Gibbs1(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P, int *Ms, int *Us, int k, curandState *) {} __global__ void __LDA_Gibbs(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float nsamps, curandState *) {} __global__ void __LDA_Gibbsy(int nrows, int nnz, float *A, float *B, float *AN, int *Cir, int *Cic, float *P, float nsamps, curandState *) {} __global__ void __randinit(curandState *rstates) {} #endif #else __global__ void __LDA_Gibbs1(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P, int *Ms, int *Us, int k, curandState *) {} __global__ void __LDA_Gibbs(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float nsamps, curandState *) {} __global__ void __LDA_Gibbsy(int nrows, int nnz, float *A, float *B, float *AN, int *Cir, int *Cic, float *P, float nsamps, curandState *) {} __global__ void __randinit(curandState *rstates) {} #endif #define DDS_BLKY 32 int LDA_Gibbs1(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P, int *Ms, int *Us, int k) { int nblocks = min(1024, max(1,nnz/128)); curandState *rstates; int err; err = cudaMalloc(( void **)& rstates , k * nblocks * sizeof(curandState)); if (err > 0) { fprintf(stderr, "Error in cudaMalloc %d", err); return err; } cudaDeviceSynchronize(); __randinit<<<nblocks,k>>>(rstates); cudaDeviceSynchronize(); __LDA_Gibbs1<<<nblocks,32>>>(nrows, nnz, A, B, Cir, Cic, P, Ms, Us, k, rstates); cudaDeviceSynchronize(); cudaFree(rstates); err = cudaGetLastError(); return err; } int LDA_Gibbs(int nrows, int nnz, float *A, float *B, float *AN, float *BN, int *Cir, int *Cic, float *P, float nsamps) { dim3 blockDims(min(32,nrows), min(32, 1+(nrows-1)/64), 1); int nblocks = min(128, max(1,nnz/128)); curandState *rstates; int err; err = cudaMalloc(( void **)& rstates , nblocks * blockDims.x * blockDims.y * sizeof(curandState)); if (err > 0) { fprintf(stderr, "Error in cudaMalloc %d", err); return err; } cudaDeviceSynchronize(); __randinit<<<nblocks,blockDims>>>(rstates); cudaDeviceSynchronize(); __LDA_Gibbs<<<nblocks,blockDims>>>(nrows, nnz, A, B, AN, BN, Cir, Cic, P, nsamps, rstates); cudaDeviceSynchronize(); cudaFree(rstates); err = cudaGetLastError(); return err; } int LDA_Gibbsy(int nrows, int ncols, float *A, float *B, float *AN, int *Cir, int *Cic, float *P, float nsamps) { dim3 blockDims(32, 32); int nblocks = min(128, max(1,ncols/2)); curandState *rstates; int err; err = cudaMalloc(( void **)& rstates , nblocks * blockDims.x * blockDims.y * sizeof(curandState)); if (err > 0) { fprintf(stderr, "Error in cudaMalloc %d", err); return err; } cudaDeviceSynchronize(); __randinit<<<nblocks,blockDims>>>(rstates); cudaDeviceSynchronize(); __LDA_Gibbsy<<<nblocks,blockDims>>>(nrows, ncols, A, B, AN, Cir, Cic, P, nsamps, rstates); cudaDeviceSynchronize(); cudaFree(rstates); err = cudaGetLastError(); return err; }
b48058127c4e53910b299634accc7ec2b11f1474.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } __global__ void doubleElements(int *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < N + stride; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { /* * * * Google */ int N = 10000; int *a; size_t size = N * sizeof(int); hipMallocManaged(&a, size); init(a, N); size_t threads_per_block = 2048; size_t number_of_blocks = 32; hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N); hipDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); hipFree(a); }
b48058127c4e53910b299634accc7ec2b11f1474.cu
#include <stdio.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } __global__ void doubleElements(int *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < N + stride; i += stride) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { /* * エラーの内容を確認して修正するために、このソース コードに * エラー処理を追加します。エラーの解決方法が不明な場合は * Google でエラー メッセージを検索してみてください。 */ int N = 10000; int *a; size_t size = N * sizeof(int); cudaMallocManaged(&a, size); init(a, N); size_t threads_per_block = 2048; size_t number_of_blocks = 32; doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); cudaDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); cudaFree(a); }
7bd93078bb92fa1dea47058c91247a3064c6d71e.hip
// !!! This is a file automatically generated by hipify!!! ///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD ///(19/01/2017) ///Grafica en Matlab los tiempos de ejecucin, considerando Radix-3. N = 3^13, Li = {3, 9,,N} , Lo= N. (precisin doble). #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hipfft.h> #include <cufftw.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_complex.h> #include <math.h> #include <math_constants.h> #include <iostream> #include <time.h> ////////////////////////////////////////////////////////////////////////// ///////////////////////DECLARACIN DE FUNCIONES/////////////////////////// ////////////////////////////////////////////////////////////////////////// void vector_entrada_xn(int Li); void arreglo_W(int N); void asign_rap(int N,int Li,int Lo); void factor(int N); void product(int vector_1[500],int vector_2[500],int valor); void etapa_entrada(void); __global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,hipDoubleComplex *x,hipDoubleComplex *W,hipDoubleComplex *y); void etapa_intermedia(void); void etapa_salida(void); __global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,hipDoubleComplex *z,hipDoubleComplex *W,hipDoubleComplex *X); ////////////////////////////////////////////////////////////////////////// /////////////////////DECLARACIN DE VARIABLES GLOBALES//////////////////// ////////////////////////////////////////////////////////////////////////// hipDoubleComplex *x_host; hipDoubleComplex *W_host; //hipDoubleComplex *y_host; //hipDoubleComplex *z_host; hipDoubleComplex *X_host; hipDoubleComplex *x_device; hipDoubleComplex *W_device; hipDoubleComplex *y_device; hipDoubleComplex *z_device; hipDoubleComplex *X_device; hipfftDoubleComplex *in,*out; FILE *db_open,*dc_open; int Dip,Dop,P,N,Li,Lo; int vF[500]; //Almacena los factores de N int svF; //Almacena el numero de factores de N int Prod[500]; int a; #define inf 99999 ////////////////////////////////////////////////////////////////////////// //////////////////////////DATOS DE ENTRADA//////////////////////////////// ////////////////////////////////////////////////////////////////////////// /// N >>> Nmero de elementos del vector de entrada /// Li >>> Nmero de elementos de entrada diferentes de cero /// Lo >>> Nmero de elementos de salida requeridos /// loop >>> Nmero de iteraciones /// muestras >>> Nmero de muestras ////////////////////////////////////////////////////////////////////////// ///////////////////////////DATOS DE SALIDA//////////////////////////////// ////////////////////////////////////////////////////////////////////////// /// X >>> Vector de salida ////////////////////////////////////////////////////////////////////////// /////////////////// SE INGRESAN LOS DATOS DE ENTRADA ///////////////////// ////////////////////////////////////////////////////////////////////////// ///Ingrese el nmero de iteraciones requeridas const int loop = 300; ///Ingrese el valor de N_max const int N_max = 13; ///Ingrese el valor de Li_max const int Lo_max = 1594323; ////////////////////////////////////////////////////////////////////////// //////////////////////////FUNCION PRINCIPAL/////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Funcin principal int main() { ////////////////////////////////////////////////////////////////////////// //////////////////////////SELECCIN DEL DEVICE//////////////////////////// ////////////////////////////////////////////////////////////////////////// int device; FILE *da; hipSetDevice(1); hipGetDevice(&device); if(device == 0) { printf("\n\n---DEVICE = GeForce GTX 970---\n\n"); da = fopen("Tiempos_N13_LiVARIA_LoN_CUDA_GTX970_DO.bin","a+b"); //Crea o sobre escribe archivo } if(device == 1) { printf("\n\n---DEVICE = TESLA K20---\n\n"); da = fopen("Tiempos_N13_LiVARIA_LoN_CUDA_TESLAK20c_DO.bin","a+b"); //Crea o sobre escribe archivo } ////////////////////////////////////////////////////////////////////////// int i,j,i_N,j_res,k_res,cont,i_prom; float suma; float promedio[N_max]; //Pausa printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n"); getchar(); for(i_N = N_max;i_N <= N_max;i_N++) { N = (int )pow(3,i_N); printf("\n N = %d \n",N); for(j_res=Lo_max;j_res <= Lo_max;j_res++) { Lo=j_res; for(k_res=1;k_res <= N_max;k_res++) { Li=(int )pow(3,k_res); printf("\n Li = %d Lo = %d",Li,Lo); ///Se abre el archivo binario db_open = fopen("Entrada_real_N13_C.bin","rb"); dc_open = fopen("Entrada_imag_N13_C.bin","rb"); suma=0.0; for(j=0;j<loop;j++) { //Comandos necesarios para medir el tiempo float elapsedTime_app; hipEvent_t start_app, stop_app; hipEventCreate(&start_app); hipEventCreate(&stop_app); //Se generan en el host los valores del vector de entrada x[n] vector_entrada_xn(Li); ///Se genera el arreglo W[N] arreglo_W(N); //--------------------------------------------------------------------------------------------- //Se empieza a medir el tiempo de ejecucion de la aplicacion hipEventRecord(start_app,0); //Se generan en el host los factores Dip y Dop asign_rap(N,Li,Lo); //Clculo en el host del factor P P = N/(Dip*Dop); //printf("\n\n FACTOR P:\n\n"); //printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P); //Funcin auxiliar del host para ejecutar la etapa de entrada etapa_entrada(); //Funcin auxiliar del host para ejecutar la etapa intermedia etapa_intermedia(); //Funcin auxiliar del host para ejecutar la etapa de salida etapa_salida(); //--------------------------------------------------------------------------------------------- //Comandos necesarios para medir el tiempo de la aplicacion (app) hipEventRecord(stop_app,0); hipEventSynchronize(stop_app); hipEventElapsedTime(&elapsedTime_app,start_app,stop_app); //Suma de todos los tiempos suma = suma + elapsedTime_app; //Se destruyen los eventos que miden el tiempo de la aplicacion hipEventDestroy(start_app); hipEventDestroy(stop_app); //Se liberan memorias del Host y Device free(x_host); free(W_host); free(X_host); hipFree(x_device); hipFree(W_device); hipFree(y_device); hipFree(z_device); hipFree(X_device); } promedio[k_res-1] = suma/(float)loop; fclose(db_open); fclose(dc_open); } } } fwrite(promedio,sizeof(float),N_max,da); printf("\n\nTIEMPOS:\n\n"); int time_print; for(time_print = 0;time_print < N_max;time_print++) { printf("\nTime (%d)= %f ms",time_print,promedio[time_print]); } fclose(da); return EXIT_SUCCESS; } ////////////////////////////////////////////////////////////////////////// /////////////////////////FUNCIONES SECUNDARIAS//////////////////////////// ////////////////////////////////////////////////////////////////////////// //sta funcin genera el vector de entrada x[n] void vector_entrada_xn(int Li) { //Declaracin de variables locales int k; float *buffer_real,*buffer_imag; //Se reserva memoria para xn_host en el host x_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*Li); buffer_real = (float*)malloc(sizeof(float)*N); buffer_imag = (float*)malloc(sizeof(float)*N); ///Se lee el vector de entrada del archivo binario fread(buffer_real,sizeof(float),N,db_open); fread(buffer_imag,sizeof(float),N,dc_open); //Se dan valores a x[n] for(k = 0;k < Li; k++) { //x_host[k] = make_cuFloatComplex((double)(rand()%11),(double)(rand()%11)); //x_host[k] = make_cuDoubleComplex((double)(k + 1),(double)(0.0)); x_host[k] = make_cuDoubleComplex((double)buffer_real[k],(double)buffer_imag[k]); } /* //Se imprimen los valores de entrada x[n] printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n"); for(k=0;k<Li;k++) { printf(" %d-> (%f) + (%f)\n",k+1,cuCreal(x_host[k]),cuCimag(x_host[k])); } */ free(buffer_real); free(buffer_imag); } //sta funcin genera el arreglo W void arreglo_W(int N) { //Declaracin de variables locales int n; //Se reserva memoria para W_host en el host W_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*N); //Se genera el arreglo W for(n = 1;n <= N;n++) { W_host[n-1] = make_cuDoubleComplex((double)cos((2*CUDART_PI*n)/N),(double)(-1)*sin((2*CUDART_PI*n)/N)); } /* //Se imprimen los valores del arreglo W[N] printf("\n---ARREGLO W[N]---\n\n"); for(n = 0;n < N; n++) { printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCreal(W_host[n]),cuCimag(W_host[n])); } */ } //sta funcin genera los factores Dip y Dop void asign_rap(int N,int Li,int Lo) { //Declaracin de variables locales float NLi,NLo,Diprapt,Doprapt; int Nh[500]; int k[500]; int G; int g,i,t,ta; int Dipt[500],Dopt[500]; float distrapt,distrap; int Pos,h,Poss; int nk[500]; int r; //Inicializaciones G = 0; svF = 0; //Factores Dip y Dop ideales NLi=(float)N/(float)Li; NLo=(float)N/(float)Lo; Diprapt=NLi; Doprapt=NLo; //Se encuentran los factores de "N" //vF almacena los factores de "N" //svF almacena el nmero de factores de "N" factor(N); /* Almacena en el vector Nh los factores que son diferentes de del vector vF En el vector k se almacena la cantidad de veces que se repite cada elemento almacenado en el vector Nh. */ Nh[0] = vF[0]; k[0]=1; for(g=1;g<=svF-1;g=g+1) { if(vF[g]!=vF[g-1]) { G=G+1; Nh[G]=vF[g]; k[G]=1; } else { k[G]=k[G]+1; } } /* Almacena en el vector Nh todas las posibles combinaciones que den como producto a N. t almacena el numero de elementos del vector Nh. */ product(Nh,k,G); t = a; for(i=0;i<t;i=i+1) { Dipt[i]=Prod[i]; } distrapt=inf; for(g=1;g<=t;g=g+1) { if(Dipt[g-1]<=NLi) { Pos=g-1; for(h=0;h<=G;h=h+1) { Poss=floor(Pos/(k[h]+1)); nk[h]=k[h]+Poss*(k[h]+1)-Pos; Pos=Poss; } product(Nh,nk,G); ta=a; for(i=0;i<ta;i=i+1) { Dopt[i]=Prod[i]; } //////////////////////////////////////////// //int j; //for(j=0;j<ta;j++) //{ // printf(" %d ",Dopt[j]); //} //printf("\n\n ta=%d\n\n",ta); /////////////////////////////////////////// for(r=0;r<ta;r=r+1) { distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2)); if(distrap<distrapt) { distrapt=distrap; Dip=Dipt[g-1]; Dop=Dopt[r]; } } } } /* printf("\n\n FACTOR Dip :\n\n"); printf(" %d ",Dip); printf("\n\n FACTOR Dop:\n\n"); printf(" %d ",Dop); */ } //sta funcin encuentra los factores de "N" void factor(int N) { //Se empieza a verificar los factores desde 2 int i=2; long N_factor; N_factor = N; while(i<=N_factor) { while((N_factor%i)==0) { vF[svF]=i; N_factor=N_factor/i; // printf("Factores: %d ",vF[svF]); svF++; } i++; } } //sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N" void product(int vector_1[500],int vector_2[500],int valor) { int d,e,s,pNh,i; int cont=0; Prod[0]=1; a=1; for(d=0;d<=valor;d=d+1) { s=a; pNh=1; for(e=1;e<=vector_2[d];e=e+1) { pNh=pNh*vector_1[d]; for(i=(s*e+1);i<=(s*e+s);i=i+1) { Prod[i-1]=pNh*Prod[cont]; cont=cont+1; } a=a+s; cont=0; } } } //Funcin auxiliar del host para calcular la etapa de entrada en el device void etapa_entrada(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA DE ENTRADA////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaracin de variables locales int k1,n1,n2; //Asignacin de memoria en el device para el arreglo "x_device" hipMalloc((void**)&x_device,Li*sizeof(hipDoubleComplex)); //Se reserva memoria en el device para el arreglo "W_device" hipMalloc((void**)&W_device,N*sizeof(hipDoubleComplex)); //Asignacin de memoria en el device para el arreglo "y" hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(hipDoubleComplex)); //Se pasa el arreglo x_host a x_device hipMemcpy(x_device,x_host,Li*sizeof(hipDoubleComplex),hipMemcpyHostToDevice); //Envo de los arreglos W hacia la memoria global del device hipMemcpy(W_device,W_host,N*sizeof(hipDoubleComplex),hipMemcpyHostToDevice); //Asignacin de memoria en el host para "y" //y_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*P*Dip*Dop); //Dimensionamiento del grid para la funcin kernel "inputStage" //Dimensionamiento del Grid dim3 gridDim(1,1,1); //Dimensionamiento del block dim3 blockDim(1,1,1); if((P*Dop) < 32 && (Dip) < 32) { blockDim.x = (P*Dop); blockDim.y = (Dip); gridDim.x = 1; gridDim.y = 1; } else { blockDim.x = 32; blockDim.y = 32; gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x)); gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y)); } //Lanzamiento del kernel "inputStage_kernel" hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device); //Esperar que el kernel termine de ejecutarse totalmente hipDeviceSynchronize(); /* //Copia del arreglo "y" del device hacia el host hipMemcpy(y_host,y_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToHost); //Se imprimen los valores de "y" printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n"); for(k1 = 0;k1 < Dip;k1++) { for(n1 = 0;n1 < Dop;n1++) { for(n2 = 0;n2 < P;n2++) { printf(" (%f) + (%f) ",cuCreal(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimag(y_host[(k1*Dop*P)+(n1*P)+n2])); } printf("\n"); } printf("\n\n"); } printf("\n"); */ } //funcin kernel que ejecuta la etapa de entrada en el device __global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,hipDoubleComplex *x,hipDoubleComplex *W,hipDoubleComplex *y) { int n1,n2; hipDoubleComplex t1; //Threads int n = blockDim.x *blockIdx.x + threadIdx.x; int k1 = blockDim.y *blockIdx.y + threadIdx.y; //Se resetean las flags //flag_inputstage_1_d[0] = 0; //flag_inputstage_2_d[0] = 0; //flag_inputstage_3_d[0] = 0; //printf("\n n = %d k1 = %d",n,k1); if( (n < (P*Dop)) && (k1 < Dip)) { n2 = floorf(n/Dop); n1 = n - (Dop*n2); //Generacin de los elementos que dependen de x[0] if(n == 0) { y[(k1*Dop*P)+(0*P)+ 0] = x[0]; ///Flag //flag_inputstage_1_d[0] = 1; } //Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's if((n >= 1) && (n <= (Li-1))) { t1 = x[n]; if(k1 == 0) { y[(0*Dop*P)+(n1*P)+ n2] = t1; } if(k1 >= 1) { y[(k1*Dop*P)+(n1*P)+ n2] = cuCmul(W[((n*k1)%N)-1],t1); } ///Flag //flag_inputstage_2_d[0] = 1; } //Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1 if((n >= Li) && (n <= (P*Dop)-1)) { y[(k1*Dop*P)+(n1*P)+ n2] = make_cuDoubleComplex(0.0,0.0); ///Flag //flag_inputstage_3_d[0] = 1; } //printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2])); } } //Funcin auxiliar del host para calcular la etapa intermedia en el device void etapa_intermedia(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA INTERMEDIA////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaracin de variables locales int k1,k2,n1; int n[1] = {P}; int inembed[1] = {P}; int onembed[1] = {P}; //Asignacin de memoria en el device para "z" hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(hipDoubleComplex)); //Asignacin de memoria en el host para "z" //z_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*P*Dip*Dop); //Asignacin de memoria en el device para "in" y "out" hipMalloc((void**)&in,sizeof(hipfftDoubleComplex)*P*Dip*Dop); hipMalloc((void**)&out,sizeof(hipfftDoubleComplex)*P*Dip*Dop); //Se copia el arreglo "y" al arreglo "in" hipMemcpy(in,y_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice); //Se crea un plan hipfftHandle plan; hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_Z2Z,Dip*Dop); //Ejecucin del plan hipfftExecZ2Z(plan,in,out,HIPFFT_FORWARD); //Esperar que el kernel termine de ejecutarse totalmente hipDeviceSynchronize(); //Se copian los datos del arreglo "out" al arreglo "z_device" hipMemcpy(z_device,out,sizeof(hipfftDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice); //Se destruye el plan hipfftDestroy(plan); //Se liberan los arreglos "in" y "out" hipFree(in); hipFree(out); /* //Se copian los datos del arreglo "z_device" al arreglo "z_host" hipMemcpy(z_host,z_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToHost); ///Se imprimen los valores de z(n1,k2,k1) printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n"); for(k1 = 0;k1 < Dip;k1++) { for(n1 = 0;n1 < Dop;n1++) { for(k2 = 0;k2 < P;k2++) { printf(" (%f) + (%f) ",cuCreal(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimag(z_host[(k1*Dop*P)+(n1*P)+k2])); } printf("\n"); } printf("\n\n"); } printf("\n"); */ } //Funcin auxiliar del host para calcular la etapa de salida en el device void etapa_salida(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA DE SALIDA/////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaracin de variables locales int m; //Asignacin de memoria en el device para "X" hipMalloc((void**)&X_device,Lo*sizeof(hipDoubleComplex)); //Asignacin de memoria en el host para "X" X_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*Lo); //Dimensionamiento del grid para la funcin kernel "outputStage" //Dimensionamiento del Grid dim3 gridDim(1,1,1); //Dimensionamiento del block dim3 blockDim(1,1,1); if((Lo) < 1024) { blockDim.x = Lo; gridDim.x = 1; } else { blockDim.x = 1024; gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x)); } //Lanzamiento del kernel "outputStage_kernel" hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device); //Esperar que el kernel termine de ejecutarse totalmente hipDeviceSynchronize(); //Copia del arreglo "X" del device hacia el host hipMemcpy(X_host,X_device,sizeof(hipDoubleComplex)*Lo,hipMemcpyDeviceToHost); /* //Se imprimen los valores de "X_host" ///Imprimir X[k] printf("\n\n--- ARREGLO X[k] ---\n\n"); for(m=0;m<=Lo-1;m++) { printf("\n X[%d] = %f + (%f)",m,cuCreal(X_host[m]),cuCimag(X_host[m])); //fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i])); } */ } //funcin kernel que ejecuta la etapa de salida en el device __global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,hipDoubleComplex *z,hipDoubleComplex *W,hipDoubleComplex *X) { //Declaracin de variables locales int n1,k_aux,k1,k2,a,b; hipDoubleComplex t1,t2,t3,t4,t5; //Threads int k = blockDim.x *blockIdx.x + threadIdx.x; //Se resetean las flags //flag_outputstage_1_d[0] = 0; //flag_outputstage_2_d[0] = 0; //flag_outputstage_3_d[0] = 0; if(k < Lo) { for(n1 = 0; n1 <= (Dop-1); n1 = n1+1) { if(Lo <= Dip) { //Clculo de X(k) para 0<=k<=Lo-1. //printf("\n--- Caso (Lo <= Dip) ---\n"); //En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1 if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { X[k] = z[(k*Dop*P)+(0*P) + 0]; ///Flag //flag_outputstage_1_d[0] = 1; } else { if(n1 == 1) { X[k] = z[(k*Dop*P)+(0*P) + 0]; } X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]); ///Flag //flag_outputstage_1_d[0] = 1; } } else { if((k >= 0) && (k <= (Dip-1))) { //Clculo de X(k) para 0<=k<=Dip-1. //En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1 if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { X[k] = z[(k*Dop*P)+(0*P) + 0]; } else { if(n1 == 1) { X[k] = z[(k*Dop*P)+(0*P) + 0]; } X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]); } } else { if(Dop <= 4) { //Usando el mtodo directo //printf("\n--- Caso (Metodo directo) ---\n"); if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)]; //printf("\nk = %d,k_aux = %d,k2 = %d,k1 = %d",k,k_aux,k2,k1); ///Flag //flag_outputstage_2_d[0] = 1; } else { if(n1 == 1) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)]; } a = floorf(k/(Dip*P)); X[k] = cuCadd(X[k],cuCmul(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1])); ///Flag //flag_outputstage_2_d[0] = 1; } } else { //Usando el mtodo filtering 2BF //printf("\n--- Caso (Filtro 2BF) ---\n"); if((Dop-2) >= 1) { if(n1 == 0) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; b = floorf(k/(Dip*P)); t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0)); /* if(k == 256) { printf("\nW = %d, k = %d,k_aux = %d,k2 = %d,k1 = %d, b= %d,z= %d",(((k2+(P*(b)))*Dip)%N)-1,k,k_aux,k2,k1,b,(k1*Dop*P)+((Dop-1)*P)+ (k2%P)); } */ ///Flag //flag_outputstage_3_d[0] = 1; } if((n1 >= 1) && (n1 <= (Dop-2))) { t2 = t1; t1 = cuCadd(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4); t3 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0)); t4 = cuCsub(t3,t2); /* if(k == 256) { printf("\nW= %d",(((k2+(P*(b)))*Dip)%N)-1); } */ } if(n1 == (Dop-1)) { t5 = cuCadd(z[(k1*Dop*P)+(k2%P)],t4); X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1]))); } } else { if(Dop == 1) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; X[k] = t1; ///Flag //flag_outputstage_3_d[0] = 1; } else { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; b = floorf(k/(Dip*P)); t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0)); t5 = cuCadd(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4); X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1]))); ///Flag //flag_outputstage_3_d[0] = 1; } } } } } } } }
7bd93078bb92fa1dea47058c91247a3064c6d71e.cu
///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD ///(19/01/2017) ///Grafica en Matlab los tiempos de ejecución, considerando Radix-3. N = 3^13, Li = {3, 9,…,N} , Lo= N. (precisión doble). #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cufft.h> #include <cufftw.h> #include <stdio.h> #include <stdlib.h> #include <cuComplex.h> #include <math.h> #include <math_constants.h> #include <iostream> #include <time.h> ////////////////////////////////////////////////////////////////////////// ///////////////////////DECLARACIÓN DE FUNCIONES/////////////////////////// ////////////////////////////////////////////////////////////////////////// void vector_entrada_xn(int Li); void arreglo_W(int N); void asign_rap(int N,int Li,int Lo); void factor(int N); void product(int vector_1[500],int vector_2[500],int valor); void etapa_entrada(void); __global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuDoubleComplex *x,cuDoubleComplex *W,cuDoubleComplex *y); void etapa_intermedia(void); void etapa_salida(void); __global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuDoubleComplex *z,cuDoubleComplex *W,cuDoubleComplex *X); ////////////////////////////////////////////////////////////////////////// /////////////////////DECLARACIÓN DE VARIABLES GLOBALES//////////////////// ////////////////////////////////////////////////////////////////////////// cuDoubleComplex *x_host; cuDoubleComplex *W_host; //cuDoubleComplex *y_host; //cuDoubleComplex *z_host; cuDoubleComplex *X_host; cuDoubleComplex *x_device; cuDoubleComplex *W_device; cuDoubleComplex *y_device; cuDoubleComplex *z_device; cuDoubleComplex *X_device; cufftDoubleComplex *in,*out; FILE *db_open,*dc_open; int Dip,Dop,P,N,Li,Lo; int vF[500]; //Almacena los factores de N int svF; //Almacena el numero de factores de N int Prod[500]; int a; #define inf 99999 ////////////////////////////////////////////////////////////////////////// //////////////////////////DATOS DE ENTRADA//////////////////////////////// ////////////////////////////////////////////////////////////////////////// /// N >>> Número de elementos del vector de entrada /// Li >>> Número de elementos de entrada diferentes de cero /// Lo >>> Número de elementos de salida requeridos /// loop >>> Número de iteraciones /// muestras >>> Número de muestras ////////////////////////////////////////////////////////////////////////// ///////////////////////////DATOS DE SALIDA//////////////////////////////// ////////////////////////////////////////////////////////////////////////// /// X >>> Vector de salida ////////////////////////////////////////////////////////////////////////// /////////////////// SE INGRESAN LOS DATOS DE ENTRADA ///////////////////// ////////////////////////////////////////////////////////////////////////// ///Ingrese el número de iteraciones requeridas const int loop = 300; ///Ingrese el valor de N_max const int N_max = 13; ///Ingrese el valor de Li_max const int Lo_max = 1594323; ////////////////////////////////////////////////////////////////////////// //////////////////////////FUNCION PRINCIPAL/////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Función principal int main() { ////////////////////////////////////////////////////////////////////////// //////////////////////////SELECCIÓN DEL DEVICE//////////////////////////// ////////////////////////////////////////////////////////////////////////// int device; FILE *da; cudaSetDevice(1); cudaGetDevice(&device); if(device == 0) { printf("\n\n---DEVICE = GeForce GTX 970---\n\n"); da = fopen("Tiempos_N13_LiVARIA_LoN_CUDA_GTX970_DO.bin","a+b"); //Crea o sobre escribe archivo } if(device == 1) { printf("\n\n---DEVICE = TESLA K20---\n\n"); da = fopen("Tiempos_N13_LiVARIA_LoN_CUDA_TESLAK20c_DO.bin","a+b"); //Crea o sobre escribe archivo } ////////////////////////////////////////////////////////////////////////// int i,j,i_N,j_res,k_res,cont,i_prom; float suma; float promedio[N_max]; //Pausa printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n"); getchar(); for(i_N = N_max;i_N <= N_max;i_N++) { N = (int )pow(3,i_N); printf("\n N = %d \n",N); for(j_res=Lo_max;j_res <= Lo_max;j_res++) { Lo=j_res; for(k_res=1;k_res <= N_max;k_res++) { Li=(int )pow(3,k_res); printf("\n Li = %d Lo = %d",Li,Lo); ///Se abre el archivo binario db_open = fopen("Entrada_real_N13_C.bin","rb"); dc_open = fopen("Entrada_imag_N13_C.bin","rb"); suma=0.0; for(j=0;j<loop;j++) { //Comandos necesarios para medir el tiempo float elapsedTime_app; cudaEvent_t start_app, stop_app; cudaEventCreate(&start_app); cudaEventCreate(&stop_app); //Se generan en el host los valores del vector de entrada x[n] vector_entrada_xn(Li); ///Se genera el arreglo W[N] arreglo_W(N); //--------------------------------------------------------------------------------------------- //Se empieza a medir el tiempo de ejecucion de la aplicacion cudaEventRecord(start_app,0); //Se generan en el host los factores Dip y Dop asign_rap(N,Li,Lo); //Cálculo en el host del factor P P = N/(Dip*Dop); //printf("\n\n FACTOR P:\n\n"); //printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P); //Función auxiliar del host para ejecutar la etapa de entrada etapa_entrada(); //Función auxiliar del host para ejecutar la etapa intermedia etapa_intermedia(); //Función auxiliar del host para ejecutar la etapa de salida etapa_salida(); //--------------------------------------------------------------------------------------------- //Comandos necesarios para medir el tiempo de la aplicacion (app) cudaEventRecord(stop_app,0); cudaEventSynchronize(stop_app); cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app); //Suma de todos los tiempos suma = suma + elapsedTime_app; //Se destruyen los eventos que miden el tiempo de la aplicacion cudaEventDestroy(start_app); cudaEventDestroy(stop_app); //Se liberan memorias del Host y Device free(x_host); free(W_host); free(X_host); cudaFree(x_device); cudaFree(W_device); cudaFree(y_device); cudaFree(z_device); cudaFree(X_device); } promedio[k_res-1] = suma/(float)loop; fclose(db_open); fclose(dc_open); } } } fwrite(promedio,sizeof(float),N_max,da); printf("\n\nTIEMPOS:\n\n"); int time_print; for(time_print = 0;time_print < N_max;time_print++) { printf("\nTime (%d)= %f ms",time_print,promedio[time_print]); } fclose(da); return EXIT_SUCCESS; } ////////////////////////////////////////////////////////////////////////// /////////////////////////FUNCIONES SECUNDARIAS//////////////////////////// ////////////////////////////////////////////////////////////////////////// //Ésta función genera el vector de entrada x[n] void vector_entrada_xn(int Li) { //Declaración de variables locales int k; float *buffer_real,*buffer_imag; //Se reserva memoria para xn_host en el host x_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*Li); buffer_real = (float*)malloc(sizeof(float)*N); buffer_imag = (float*)malloc(sizeof(float)*N); ///Se lee el vector de entrada del archivo binario fread(buffer_real,sizeof(float),N,db_open); fread(buffer_imag,sizeof(float),N,dc_open); //Se dan valores a x[n] for(k = 0;k < Li; k++) { //x_host[k] = make_cuFloatComplex((double)(rand()%11),(double)(rand()%11)); //x_host[k] = make_cuDoubleComplex((double)(k + 1),(double)(0.0)); x_host[k] = make_cuDoubleComplex((double)buffer_real[k],(double)buffer_imag[k]); } /* //Se imprimen los valores de entrada x[n] printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n"); for(k=0;k<Li;k++) { printf(" %d-> (%f) + (%f)\n",k+1,cuCreal(x_host[k]),cuCimag(x_host[k])); } */ free(buffer_real); free(buffer_imag); } //Ésta función genera el arreglo W void arreglo_W(int N) { //Declaración de variables locales int n; //Se reserva memoria para W_host en el host W_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*N); //Se genera el arreglo W for(n = 1;n <= N;n++) { W_host[n-1] = make_cuDoubleComplex((double)cos((2*CUDART_PI*n)/N),(double)(-1)*sin((2*CUDART_PI*n)/N)); } /* //Se imprimen los valores del arreglo W[N] printf("\n---ARREGLO W[N]---\n\n"); for(n = 0;n < N; n++) { printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCreal(W_host[n]),cuCimag(W_host[n])); } */ } //Ésta función genera los factores Dip y Dop void asign_rap(int N,int Li,int Lo) { //Declaración de variables locales float NLi,NLo,Diprapt,Doprapt; int Nh[500]; int k[500]; int G; int g,i,t,ta; int Dipt[500],Dopt[500]; float distrapt,distrap; int Pos,h,Poss; int nk[500]; int r; //Inicializaciones G = 0; svF = 0; //Factores Dip y Dop ideales NLi=(float)N/(float)Li; NLo=(float)N/(float)Lo; Diprapt=NLi; Doprapt=NLo; //Se encuentran los factores de "N" //vF almacena los factores de "N" //svF almacena el número de factores de "N" factor(N); /* Almacena en el vector Nh los factores que son diferentes de del vector vF En el vector k se almacena la cantidad de veces que se repite cada elemento almacenado en el vector Nh. */ Nh[0] = vF[0]; k[0]=1; for(g=1;g<=svF-1;g=g+1) { if(vF[g]!=vF[g-1]) { G=G+1; Nh[G]=vF[g]; k[G]=1; } else { k[G]=k[G]+1; } } /* Almacena en el vector Nh todas las posibles combinaciones que den como producto a N. t almacena el numero de elementos del vector Nh. */ product(Nh,k,G); t = a; for(i=0;i<t;i=i+1) { Dipt[i]=Prod[i]; } distrapt=inf; for(g=1;g<=t;g=g+1) { if(Dipt[g-1]<=NLi) { Pos=g-1; for(h=0;h<=G;h=h+1) { Poss=floor(Pos/(k[h]+1)); nk[h]=k[h]+Poss*(k[h]+1)-Pos; Pos=Poss; } product(Nh,nk,G); ta=a; for(i=0;i<ta;i=i+1) { Dopt[i]=Prod[i]; } //////////////////////////////////////////// //int j; //for(j=0;j<ta;j++) //{ // printf(" %d ",Dopt[j]); //} //printf("\n\n ta=%d\n\n",ta); /////////////////////////////////////////// for(r=0;r<ta;r=r+1) { distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2)); if(distrap<distrapt) { distrapt=distrap; Dip=Dipt[g-1]; Dop=Dopt[r]; } } } } /* printf("\n\n FACTOR Dip :\n\n"); printf(" %d ",Dip); printf("\n\n FACTOR Dop:\n\n"); printf(" %d ",Dop); */ } //Ésta función encuentra los factores de "N" void factor(int N) { //Se empieza a verificar los factores desde 2 int i=2; long N_factor; N_factor = N; while(i<=N_factor) { while((N_factor%i)==0) { vF[svF]=i; N_factor=N_factor/i; // printf("Factores: %d ",vF[svF]); svF++; } i++; } } //Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N" void product(int vector_1[500],int vector_2[500],int valor) { int d,e,s,pNh,i; int cont=0; Prod[0]=1; a=1; for(d=0;d<=valor;d=d+1) { s=a; pNh=1; for(e=1;e<=vector_2[d];e=e+1) { pNh=pNh*vector_1[d]; for(i=(s*e+1);i<=(s*e+s);i=i+1) { Prod[i-1]=pNh*Prod[cont]; cont=cont+1; } a=a+s; cont=0; } } } //Función auxiliar del host para calcular la etapa de entrada en el device void etapa_entrada(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA DE ENTRADA////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaración de variables locales int k1,n1,n2; //Asignación de memoria en el device para el arreglo "x_device" cudaMalloc((void**)&x_device,Li*sizeof(cuDoubleComplex)); //Se reserva memoria en el device para el arreglo "W_device" cudaMalloc((void**)&W_device,N*sizeof(cuDoubleComplex)); //Asignación de memoria en el device para el arreglo "y" cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuDoubleComplex)); //Se pasa el arreglo x_host a x_device cudaMemcpy(x_device,x_host,Li*sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); //Envío de los arreglos W hacia la memoria global del device cudaMemcpy(W_device,W_host,N*sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); //Asignación de memoria en el host para "y" //y_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*P*Dip*Dop); //Dimensionamiento del grid para la función kernel "inputStage" //Dimensionamiento del Grid dim3 gridDim(1,1,1); //Dimensionamiento del block dim3 blockDim(1,1,1); if((P*Dop) < 32 && (Dip) < 32) { blockDim.x = (P*Dop); blockDim.y = (Dip); gridDim.x = 1; gridDim.y = 1; } else { blockDim.x = 32; blockDim.y = 32; gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x)); gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y)); } //Lanzamiento del kernel "inputStage_kernel" inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device); //Esperar que el kernel termine de ejecutarse totalmente cudaDeviceSynchronize(); /* //Copia del arreglo "y" del device hacia el host cudaMemcpy(y_host,y_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost); //Se imprimen los valores de "y" printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n"); for(k1 = 0;k1 < Dip;k1++) { for(n1 = 0;n1 < Dop;n1++) { for(n2 = 0;n2 < P;n2++) { printf(" (%f) + (%f) ",cuCreal(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimag(y_host[(k1*Dop*P)+(n1*P)+n2])); } printf("\n"); } printf("\n\n"); } printf("\n"); */ } //función kernel que ejecuta la etapa de entrada en el device __global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuDoubleComplex *x,cuDoubleComplex *W,cuDoubleComplex *y) { int n1,n2; cuDoubleComplex t1; //Threads int n = blockDim.x *blockIdx.x + threadIdx.x; int k1 = blockDim.y *blockIdx.y + threadIdx.y; //Se resetean las flags //flag_inputstage_1_d[0] = 0; //flag_inputstage_2_d[0] = 0; //flag_inputstage_3_d[0] = 0; //printf("\n n = %d k1 = %d",n,k1); if( (n < (P*Dop)) && (k1 < Dip)) { n2 = floorf(n/Dop); n1 = n - (Dop*n2); //Generación de los elementos que dependen de x[0] if(n == 0) { y[(k1*Dop*P)+(0*P)+ 0] = x[0]; ///Flag //flag_inputstage_1_d[0] = 1; } //Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's if((n >= 1) && (n <= (Li-1))) { t1 = x[n]; if(k1 == 0) { y[(0*Dop*P)+(n1*P)+ n2] = t1; } if(k1 >= 1) { y[(k1*Dop*P)+(n1*P)+ n2] = cuCmul(W[((n*k1)%N)-1],t1); } ///Flag //flag_inputstage_2_d[0] = 1; } //Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1 if((n >= Li) && (n <= (P*Dop)-1)) { y[(k1*Dop*P)+(n1*P)+ n2] = make_cuDoubleComplex(0.0,0.0); ///Flag //flag_inputstage_3_d[0] = 1; } //printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2])); } } //Función auxiliar del host para calcular la etapa intermedia en el device void etapa_intermedia(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA INTERMEDIA////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaración de variables locales int k1,k2,n1; int n[1] = {P}; int inembed[1] = {P}; int onembed[1] = {P}; //Asignación de memoria en el device para "z" cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuDoubleComplex)); //Asignación de memoria en el host para "z" //z_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*P*Dip*Dop); //Asignación de memoria en el device para "in" y "out" cudaMalloc((void**)&in,sizeof(cufftDoubleComplex)*P*Dip*Dop); cudaMalloc((void**)&out,sizeof(cufftDoubleComplex)*P*Dip*Dop); //Se copia el arreglo "y" al arreglo "in" cudaMemcpy(in,y_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice); //Se crea un plan cufftHandle plan; cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_Z2Z,Dip*Dop); //Ejecución del plan cufftExecZ2Z(plan,in,out,CUFFT_FORWARD); //Esperar que el kernel termine de ejecutarse totalmente cudaDeviceSynchronize(); //Se copian los datos del arreglo "out" al arreglo "z_device" cudaMemcpy(z_device,out,sizeof(cufftDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice); //Se destruye el plan cufftDestroy(plan); //Se liberan los arreglos "in" y "out" cudaFree(in); cudaFree(out); /* //Se copian los datos del arreglo "z_device" al arreglo "z_host" cudaMemcpy(z_host,z_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost); ///Se imprimen los valores de z(n1,k2,k1) printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n"); for(k1 = 0;k1 < Dip;k1++) { for(n1 = 0;n1 < Dop;n1++) { for(k2 = 0;k2 < P;k2++) { printf(" (%f) + (%f) ",cuCreal(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimag(z_host[(k1*Dop*P)+(n1*P)+k2])); } printf("\n"); } printf("\n\n"); } printf("\n"); */ } //Función auxiliar del host para calcular la etapa de salida en el device void etapa_salida(void) { ////////////////////////////////////////////////////////////////////////// ////////////////////////////ETAPA DE SALIDA/////////////////////////////// ////////////////////////////////////////////////////////////////////////// //Declaración de variables locales int m; //Asignación de memoria en el device para "X" cudaMalloc((void**)&X_device,Lo*sizeof(cuDoubleComplex)); //Asignación de memoria en el host para "X" X_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*Lo); //Dimensionamiento del grid para la función kernel "outputStage" //Dimensionamiento del Grid dim3 gridDim(1,1,1); //Dimensionamiento del block dim3 blockDim(1,1,1); if((Lo) < 1024) { blockDim.x = Lo; gridDim.x = 1; } else { blockDim.x = 1024; gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x)); } //Lanzamiento del kernel "outputStage_kernel" outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device); //Esperar que el kernel termine de ejecutarse totalmente cudaDeviceSynchronize(); //Copia del arreglo "X" del device hacia el host cudaMemcpy(X_host,X_device,sizeof(cuDoubleComplex)*Lo,cudaMemcpyDeviceToHost); /* //Se imprimen los valores de "X_host" ///Imprimir X[k] printf("\n\n--- ARREGLO X[k] ---\n\n"); for(m=0;m<=Lo-1;m++) { printf("\n X[%d] = %f + (%f)",m,cuCreal(X_host[m]),cuCimag(X_host[m])); //fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i])); } */ } //función kernel que ejecuta la etapa de salida en el device __global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuDoubleComplex *z,cuDoubleComplex *W,cuDoubleComplex *X) { //Declaración de variables locales int n1,k_aux,k1,k2,a,b; cuDoubleComplex t1,t2,t3,t4,t5; //Threads int k = blockDim.x *blockIdx.x + threadIdx.x; //Se resetean las flags //flag_outputstage_1_d[0] = 0; //flag_outputstage_2_d[0] = 0; //flag_outputstage_3_d[0] = 0; if(k < Lo) { for(n1 = 0; n1 <= (Dop-1); n1 = n1+1) { if(Lo <= Dip) { //Cálculo de X(k) para 0<=k<=Lo-1. //printf("\n--- Caso (Lo <= Dip) ---\n"); //En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1 if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { X[k] = z[(k*Dop*P)+(0*P) + 0]; ///Flag //flag_outputstage_1_d[0] = 1; } else { if(n1 == 1) { X[k] = z[(k*Dop*P)+(0*P) + 0]; } X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]); ///Flag //flag_outputstage_1_d[0] = 1; } } else { if((k >= 0) && (k <= (Dip-1))) { //Cálculo de X(k) para 0<=k<=Dip-1. //En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1 if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { X[k] = z[(k*Dop*P)+(0*P) + 0]; } else { if(n1 == 1) { X[k] = z[(k*Dop*P)+(0*P) + 0]; } X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]); } } else { if(Dop <= 4) { //Usando el método directo //printf("\n--- Caso (Metodo directo) ---\n"); if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)]; //printf("\nk = %d,k_aux = %d,k2 = %d,k1 = %d",k,k_aux,k2,k1); ///Flag //flag_outputstage_2_d[0] = 1; } else { if(n1 == 1) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)]; } a = floorf(k/(Dip*P)); X[k] = cuCadd(X[k],cuCmul(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1])); ///Flag //flag_outputstage_2_d[0] = 1; } } else { //Usando el método filtering 2BF //printf("\n--- Caso (Filtro 2BF) ---\n"); if((Dop-2) >= 1) { if(n1 == 0) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; b = floorf(k/(Dip*P)); t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0)); /* if(k == 256) { printf("\nW = %d, k = %d,k_aux = %d,k2 = %d,k1 = %d, b= %d,z= %d",(((k2+(P*(b)))*Dip)%N)-1,k,k_aux,k2,k1,b,(k1*Dop*P)+((Dop-1)*P)+ (k2%P)); } */ ///Flag //flag_outputstage_3_d[0] = 1; } if((n1 >= 1) && (n1 <= (Dop-2))) { t2 = t1; t1 = cuCadd(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4); t3 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0)); t4 = cuCsub(t3,t2); /* if(k == 256) { printf("\nW= %d",(((k2+(P*(b)))*Dip)%N)-1); } */ } if(n1 == (Dop-1)) { t5 = cuCadd(z[(k1*Dop*P)+(k2%P)],t4); X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1]))); } } else { if(Dop == 1) { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; X[k] = t1; ///Flag //flag_outputstage_3_d[0] = 1; } else { k_aux = k-((Dip*P)*floorf(k/(Dip*P))); k2 = floorf(k_aux/Dip); k1 = k_aux-(Dip*k2); t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)]; b = floorf(k/(Dip*P)); t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0)); t5 = cuCadd(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4); X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1]))); ///Flag //flag_outputstage_3_d[0] = 1; } } } } } } } }
650a07541a4844666ec6dce1a282e4ad97a2339a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_cosh.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_cosh), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_cosh), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_cosh), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
650a07541a4844666ec6dce1a282e4ad97a2339a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_cosh.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_cosh<<<gridBlock,threadBlock>>>(n,result,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_cosh<<<gridBlock,threadBlock>>>(n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_cosh<<<gridBlock,threadBlock>>>(n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bc6a5e11a1b04013622c598924a41aeb0fc960fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <type_traits> #include <algorithm> #include "cudf.h" #include "dlpack/dlpack.h" #include "tests/utilities/cudf_test_fixtures.h" #include "tests/utilities/column_wrapper.cuh" template <class TestParameters> struct DLPackTypedTest : public GdfTest { using TestParam = TestParameters; }; struct DLPackTest : public GdfTest { }; using Types = testing::Types<int8_t, int16_t, int32_t, int64_t, float, double>; TYPED_TEST_CASE(DLPackTypedTest, Types); namespace{ static inline size_t tensor_size(const DLTensor& t) { size_t size = 1; for (int i = 0; i < t.ndim; ++i) size *= t.shape[i]; size *= (t.dtype.bits * t.dtype.lanes + 7) / 8; return size; } template <typename T> DLDataType get_DLDataType() { DLDataType type; if (std::is_integral<T>::value) { if (std::is_signed<T>::value) type.code = kDLInt; else type.code = kDLUInt; } else if (std::is_floating_point<T>::value) type.code = kDLFloat; else type.code = 3U; // error! type.bits = sizeof(T) * 8; type.lanes = 1; return type; } void deleter(DLManagedTensor * arg) { if (arg->dl_tensor.ctx.device_type == kDLGPU) RMM_FREE(arg->dl_tensor.data, 0); else if (arg->dl_tensor.ctx.device_type == kDLCPUPinned) hipFree(arg->dl_tensor.data); else free(arg->dl_tensor.data); delete [] arg->dl_tensor.shape; delete [] arg->dl_tensor.strides; delete arg; } template <typename T> __global__ void foo(T *in, int size) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) in[i] = i; } template <typename T> DLManagedTensor* create_DLTensor(gdf_size_type ncols, gdf_size_type nrows, DLDeviceType device_type = kDLGPU) { DLManagedTensor *mng_tensor = new DLManagedTensor; DLTensor &tensor = mng_tensor->dl_tensor; tensor.data = 0; tensor.ndim = (ncols > 1) ? 2 : 1; tensor.dtype = get_DLDataType<T>(); if (tensor.dtype.code > kDLFloat) return nullptr; tensor.shape = new int64_t[tensor.ndim]; tensor.shape[0] = nrows; if (tensor.ndim > 1) tensor.shape[1] = ncols; tensor.strides = nullptr; tensor.byte_offset = 0; tensor.ctx.device_id = 0; tensor.ctx.device_type = device_type; T *data = nullptr; const size_t N = nrows * ncols; size_t bytesize = tensor_size(mng_tensor->dl_tensor); T *init = new T[N]; for (gdf_size_type c = 0; c < ncols; ++c) for (gdf_size_type i = 0; i < nrows; ++i) init[c*nrows + i] = i; if (kDLGPU == device_type) { EXPECT_EQ(RMM_ALLOC(&data, bytesize, 0), RMM_SUCCESS); hipMemcpy(data, init, bytesize, hipMemcpyDefault); } else { data = static_cast<T*>(malloc(bytesize)); memcpy(data, init, bytesize); } delete [] init; EXPECT_NE(data, nullptr); if (data == nullptr) return nullptr; tensor.data = data; mng_tensor->manager_ctx = nullptr; mng_tensor->deleter = deleter; return mng_tensor; } } TEST_F(DLPackTest, InvalidDeviceType) { using T = int32_t; constexpr int64_t length = 100; DLManagedTensor *mng_tensor = create_DLTensor<T>(1, length, kDLCPU); ASSERT_NE(mng_tensor, nullptr); gdf_column *columns = nullptr; int num_columns = 0; // We support kDLGPU, kDLCPU, and kDLCPUPinned for (int i = kDLOpenCL; i <= kDLExtDev; i++) { mng_tensor->dl_tensor.ctx.device_type = static_cast<DLDeviceType>(i); ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_INVALID_API_CALL); } EXPECT_EQ(nullptr, columns); EXPECT_EQ(num_columns, 0); deleter(mng_tensor); } TEST_F(DLPackTest, InvalidDevice) { using T = int32_t; constexpr int64_t length = 100; int device_id = 0; ASSERT_EQ(hipGetDevice(&device_id), hipSuccess); DLManagedTensor *mng_tensor = create_DLTensor<T>(1, length); // spoof the wrong device ID mng_tensor->dl_tensor.ctx.device_id = device_id + 1; gdf_column *columns = nullptr; int num_columns = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_INVALID_API_CALL); EXPECT_EQ(nullptr, columns); EXPECT_EQ(num_columns, 0); deleter(mng_tensor); } TEST_F(DLPackTest, UnsupportedDimensions) { using T = int32_t; constexpr int64_t length = 100; DLManagedTensor *mng_tensor = create_DLTensor<T>(2, length); gdf_column *columns = nullptr; int num_columns = 0; // too many dimensions mng_tensor->dl_tensor.ndim = 3; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_NOTIMPLEMENTED_ERROR); mng_tensor->dl_tensor.ndim = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_DATASET_EMPTY); mng_tensor->dl_tensor.ndim = 1; mng_tensor->dl_tensor.shape[0] = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_DATASET_EMPTY); mng_tensor->dl_tensor.ndim = 1; mng_tensor->dl_tensor.shape[0] = std::numeric_limits<gdf_size_type>::max(); ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_COLUMN_SIZE_TOO_BIG); EXPECT_EQ(nullptr, columns); EXPECT_EQ(num_columns, 0); deleter(mng_tensor); } TEST_F(DLPackTest, UnsupportedDataType) { using T = uint32_t; // unsigned types not supported yet constexpr int64_t length = 100; DLManagedTensor *mng_tensor = create_DLTensor<T>(1, length); gdf_column *columns = nullptr; int num_columns = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_UNSUPPORTED_DTYPE); EXPECT_EQ(nullptr, columns); EXPECT_EQ(num_columns, 0); deleter(mng_tensor); } TEST_F(DLPackTest, ToDLPack_EmptyDataset) { ASSERT_EQ(gdf_to_dlpack(nullptr, nullptr, 1), GDF_DATASET_EMPTY); DLManagedTensor *tensor = new DLManagedTensor; ASSERT_EQ(gdf_to_dlpack(tensor, nullptr, 1), GDF_DATASET_EMPTY); gdf_column **columns = new gdf_column*[2]; ASSERT_EQ(gdf_to_dlpack(nullptr, columns, 0), GDF_DATASET_EMPTY); ASSERT_EQ(gdf_to_dlpack(tensor, columns, 0), GDF_DATASET_EMPTY); columns[0] = new gdf_column; columns[0]->dtype = GDF_FLOAT32; columns[0]->size = 0; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_DATASET_EMPTY); delete tensor; delete columns[0]; delete [] columns; } TEST_F(DLPackTest, ToDLPack_ColumnMismatch) { gdf_column **columns = new gdf_column*[2]; columns[0] = new gdf_column; columns[1] = new gdf_column; columns[0]->size = columns[1]->size = 1; columns[0]->dtype = GDF_INT32; columns[1]->dtype = GDF_FLOAT32; DLManagedTensor *tensor = new DLManagedTensor; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 2), GDF_DTYPE_MISMATCH); columns[1]->dtype = GDF_INT32; columns[1]->size = 2; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 2), GDF_COLUMN_SIZE_MISMATCH); delete tensor; delete columns[0]; delete columns[1]; delete [] columns; } TEST_F(DLPackTest, ToDLPack_NonNumerical) { gdf_column **columns = new gdf_column*[1]; columns[0] = new gdf_column; columns[0]->size = 1; DLManagedTensor *tensor = new DLManagedTensor; // all non-numeric gdf_dtype enums results in GDF_UNSUPPORTED_TYPE columns[0]->dtype = GDF_invalid; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); columns[0]->dtype = GDF_DATE32; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); columns[0]->dtype = GDF_DATE64; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); columns[0]->dtype = GDF_TIMESTAMP; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); columns[0]->dtype = GDF_CATEGORY; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); delete tensor; delete columns[0]; delete [] columns; } TYPED_TEST(DLPackTypedTest, FromDLPack_SingleColumn) { using T = typename TestFixture::TestParam; constexpr int64_t length = 100; DLManagedTensor *mng_tensor = create_DLTensor<T>(1, length); ASSERT_NE(mng_tensor, nullptr); gdf_column *columns = nullptr; int num_columns = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_SUCCESS); ASSERT_NE(columns, nullptr); // We currently only support 1D Tensors ASSERT_EQ(num_columns, 1); ASSERT_EQ(columns[0].size, length); T *output = new T[length]; hipMemcpy(output, columns[0].data, length * sizeof(T), hipMemcpyDefault); for (int64_t i = 0; i < length; i++) EXPECT_EQ(static_cast<T>(i), output[i]); delete [] output; gdf_column_free(&columns[0]); delete [] columns; } TYPED_TEST(DLPackTypedTest, FromDLPack_MultiColumn) { using T = typename TestFixture::TestParam; constexpr int64_t length = 100; constexpr int64_t width = 3; DLManagedTensor *mng_tensor = create_DLTensor<T>(width, length); ASSERT_NE(mng_tensor, nullptr); gdf_column *columns = nullptr; int num_columns = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_SUCCESS); ASSERT_NE(columns, nullptr); ASSERT_EQ(num_columns, width); for (int64_t c = 0; c < num_columns; ++c) { ASSERT_EQ(columns[c].size, length); T *output = new T[length]; hipMemcpy(output, columns[c].data, length * sizeof(T), hipMemcpyDefault); for (int64_t i = 0; i < length; i++) EXPECT_EQ(static_cast<T>(i), output[i]); delete [] output; gdf_column_free(&columns[c]); } delete [] columns; } TYPED_TEST(DLPackTypedTest, ToDLPack_SingleColumn) { using T = typename TestFixture::TestParam; constexpr int64_t length = 100; cudf::test::column_wrapper<T> col0(length, [](gdf_index_type i) { return i; }, [](gdf_index_type i) { return true; }); gdf_column **columns = new gdf_column*[1]; columns[0] = col0.get(); DLManagedTensor *tensor = new DLManagedTensor; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_SUCCESS); ASSERT_EQ(tensor->dl_tensor.ndim, 1); ASSERT_EQ(tensor->dl_tensor.shape[0], length); T *output = new T[length]; hipMemcpy(output, tensor->dl_tensor.data, length * sizeof(T), hipMemcpyDefault); for (int64_t i = 0; i < length; i++) EXPECT_EQ(static_cast<T>(i), output[i]); delete [] output; tensor->deleter(tensor); delete [] columns; } TYPED_TEST(DLPackTypedTest, ToDLPack_MultiColumn) { using T = typename TestFixture::TestParam; constexpr int64_t length = 100; constexpr int64_t width = 3; cudf::test::column_wrapper<T>* cols[width]; gdf_column *columns[width]; for (int64_t c = 0; c < width; c++) { cols[c] = new cudf::test::column_wrapper<T>(length, [c](gdf_index_type i) { return i*(c+1); }, [](gdf_index_type i) { return true; }); columns[c] = cols[c]->get(); } DLManagedTensor *tensor = new DLManagedTensor; ASSERT_EQ(gdf_to_dlpack(tensor, columns, width), GDF_SUCCESS); ASSERT_EQ(tensor->dl_tensor.ndim, 2); ASSERT_EQ(tensor->dl_tensor.shape[0], length); ASSERT_EQ(tensor->dl_tensor.shape[1], width); T *output = new T[tensor_size(tensor->dl_tensor)/sizeof(T)]; hipMemcpy(output, tensor->dl_tensor.data, width * length * sizeof(T), hipMemcpyDefault); for (int64_t c = 0; c < width; c++) { T *o = &output[c * length]; for (int64_t i = 0; i < length; i++) EXPECT_EQ(static_cast<T>(i*(c+1)), o[i]); } delete [] output; tensor->deleter(tensor); for (int64_t c = 0; c < width; c++) delete cols[c]; }
bc6a5e11a1b04013622c598924a41aeb0fc960fc.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <type_traits> #include <algorithm> #include "cudf.h" #include "dlpack/dlpack.h" #include "tests/utilities/cudf_test_fixtures.h" #include "tests/utilities/column_wrapper.cuh" template <class TestParameters> struct DLPackTypedTest : public GdfTest { using TestParam = TestParameters; }; struct DLPackTest : public GdfTest { }; using Types = testing::Types<int8_t, int16_t, int32_t, int64_t, float, double>; TYPED_TEST_CASE(DLPackTypedTest, Types); namespace{ static inline size_t tensor_size(const DLTensor& t) { size_t size = 1; for (int i = 0; i < t.ndim; ++i) size *= t.shape[i]; size *= (t.dtype.bits * t.dtype.lanes + 7) / 8; return size; } template <typename T> DLDataType get_DLDataType() { DLDataType type; if (std::is_integral<T>::value) { if (std::is_signed<T>::value) type.code = kDLInt; else type.code = kDLUInt; } else if (std::is_floating_point<T>::value) type.code = kDLFloat; else type.code = 3U; // error! type.bits = sizeof(T) * 8; type.lanes = 1; return type; } void deleter(DLManagedTensor * arg) { if (arg->dl_tensor.ctx.device_type == kDLGPU) RMM_FREE(arg->dl_tensor.data, 0); else if (arg->dl_tensor.ctx.device_type == kDLCPUPinned) cudaFree(arg->dl_tensor.data); else free(arg->dl_tensor.data); delete [] arg->dl_tensor.shape; delete [] arg->dl_tensor.strides; delete arg; } template <typename T> __global__ void foo(T *in, int size) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) in[i] = i; } template <typename T> DLManagedTensor* create_DLTensor(gdf_size_type ncols, gdf_size_type nrows, DLDeviceType device_type = kDLGPU) { DLManagedTensor *mng_tensor = new DLManagedTensor; DLTensor &tensor = mng_tensor->dl_tensor; tensor.data = 0; tensor.ndim = (ncols > 1) ? 2 : 1; tensor.dtype = get_DLDataType<T>(); if (tensor.dtype.code > kDLFloat) return nullptr; tensor.shape = new int64_t[tensor.ndim]; tensor.shape[0] = nrows; if (tensor.ndim > 1) tensor.shape[1] = ncols; tensor.strides = nullptr; tensor.byte_offset = 0; tensor.ctx.device_id = 0; tensor.ctx.device_type = device_type; T *data = nullptr; const size_t N = nrows * ncols; size_t bytesize = tensor_size(mng_tensor->dl_tensor); T *init = new T[N]; for (gdf_size_type c = 0; c < ncols; ++c) for (gdf_size_type i = 0; i < nrows; ++i) init[c*nrows + i] = i; if (kDLGPU == device_type) { EXPECT_EQ(RMM_ALLOC(&data, bytesize, 0), RMM_SUCCESS); cudaMemcpy(data, init, bytesize, cudaMemcpyDefault); } else { data = static_cast<T*>(malloc(bytesize)); memcpy(data, init, bytesize); } delete [] init; EXPECT_NE(data, nullptr); if (data == nullptr) return nullptr; tensor.data = data; mng_tensor->manager_ctx = nullptr; mng_tensor->deleter = deleter; return mng_tensor; } } TEST_F(DLPackTest, InvalidDeviceType) { using T = int32_t; constexpr int64_t length = 100; DLManagedTensor *mng_tensor = create_DLTensor<T>(1, length, kDLCPU); ASSERT_NE(mng_tensor, nullptr); gdf_column *columns = nullptr; int num_columns = 0; // We support kDLGPU, kDLCPU, and kDLCPUPinned for (int i = kDLOpenCL; i <= kDLExtDev; i++) { mng_tensor->dl_tensor.ctx.device_type = static_cast<DLDeviceType>(i); ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_INVALID_API_CALL); } EXPECT_EQ(nullptr, columns); EXPECT_EQ(num_columns, 0); deleter(mng_tensor); } TEST_F(DLPackTest, InvalidDevice) { using T = int32_t; constexpr int64_t length = 100; int device_id = 0; ASSERT_EQ(cudaGetDevice(&device_id), cudaSuccess); DLManagedTensor *mng_tensor = create_DLTensor<T>(1, length); // spoof the wrong device ID mng_tensor->dl_tensor.ctx.device_id = device_id + 1; gdf_column *columns = nullptr; int num_columns = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_INVALID_API_CALL); EXPECT_EQ(nullptr, columns); EXPECT_EQ(num_columns, 0); deleter(mng_tensor); } TEST_F(DLPackTest, UnsupportedDimensions) { using T = int32_t; constexpr int64_t length = 100; DLManagedTensor *mng_tensor = create_DLTensor<T>(2, length); gdf_column *columns = nullptr; int num_columns = 0; // too many dimensions mng_tensor->dl_tensor.ndim = 3; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_NOTIMPLEMENTED_ERROR); mng_tensor->dl_tensor.ndim = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_DATASET_EMPTY); mng_tensor->dl_tensor.ndim = 1; mng_tensor->dl_tensor.shape[0] = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_DATASET_EMPTY); mng_tensor->dl_tensor.ndim = 1; mng_tensor->dl_tensor.shape[0] = std::numeric_limits<gdf_size_type>::max(); ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_COLUMN_SIZE_TOO_BIG); EXPECT_EQ(nullptr, columns); EXPECT_EQ(num_columns, 0); deleter(mng_tensor); } TEST_F(DLPackTest, UnsupportedDataType) { using T = uint32_t; // unsigned types not supported yet constexpr int64_t length = 100; DLManagedTensor *mng_tensor = create_DLTensor<T>(1, length); gdf_column *columns = nullptr; int num_columns = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_UNSUPPORTED_DTYPE); EXPECT_EQ(nullptr, columns); EXPECT_EQ(num_columns, 0); deleter(mng_tensor); } TEST_F(DLPackTest, ToDLPack_EmptyDataset) { ASSERT_EQ(gdf_to_dlpack(nullptr, nullptr, 1), GDF_DATASET_EMPTY); DLManagedTensor *tensor = new DLManagedTensor; ASSERT_EQ(gdf_to_dlpack(tensor, nullptr, 1), GDF_DATASET_EMPTY); gdf_column **columns = new gdf_column*[2]; ASSERT_EQ(gdf_to_dlpack(nullptr, columns, 0), GDF_DATASET_EMPTY); ASSERT_EQ(gdf_to_dlpack(tensor, columns, 0), GDF_DATASET_EMPTY); columns[0] = new gdf_column; columns[0]->dtype = GDF_FLOAT32; columns[0]->size = 0; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_DATASET_EMPTY); delete tensor; delete columns[0]; delete [] columns; } TEST_F(DLPackTest, ToDLPack_ColumnMismatch) { gdf_column **columns = new gdf_column*[2]; columns[0] = new gdf_column; columns[1] = new gdf_column; columns[0]->size = columns[1]->size = 1; columns[0]->dtype = GDF_INT32; columns[1]->dtype = GDF_FLOAT32; DLManagedTensor *tensor = new DLManagedTensor; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 2), GDF_DTYPE_MISMATCH); columns[1]->dtype = GDF_INT32; columns[1]->size = 2; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 2), GDF_COLUMN_SIZE_MISMATCH); delete tensor; delete columns[0]; delete columns[1]; delete [] columns; } TEST_F(DLPackTest, ToDLPack_NonNumerical) { gdf_column **columns = new gdf_column*[1]; columns[0] = new gdf_column; columns[0]->size = 1; DLManagedTensor *tensor = new DLManagedTensor; // all non-numeric gdf_dtype enums results in GDF_UNSUPPORTED_TYPE columns[0]->dtype = GDF_invalid; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); columns[0]->dtype = GDF_DATE32; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); columns[0]->dtype = GDF_DATE64; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); columns[0]->dtype = GDF_TIMESTAMP; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); columns[0]->dtype = GDF_CATEGORY; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_UNSUPPORTED_DTYPE); delete tensor; delete columns[0]; delete [] columns; } TYPED_TEST(DLPackTypedTest, FromDLPack_SingleColumn) { using T = typename TestFixture::TestParam; constexpr int64_t length = 100; DLManagedTensor *mng_tensor = create_DLTensor<T>(1, length); ASSERT_NE(mng_tensor, nullptr); gdf_column *columns = nullptr; int num_columns = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_SUCCESS); ASSERT_NE(columns, nullptr); // We currently only support 1D Tensors ASSERT_EQ(num_columns, 1); ASSERT_EQ(columns[0].size, length); T *output = new T[length]; cudaMemcpy(output, columns[0].data, length * sizeof(T), cudaMemcpyDefault); for (int64_t i = 0; i < length; i++) EXPECT_EQ(static_cast<T>(i), output[i]); delete [] output; gdf_column_free(&columns[0]); delete [] columns; } TYPED_TEST(DLPackTypedTest, FromDLPack_MultiColumn) { using T = typename TestFixture::TestParam; constexpr int64_t length = 100; constexpr int64_t width = 3; DLManagedTensor *mng_tensor = create_DLTensor<T>(width, length); ASSERT_NE(mng_tensor, nullptr); gdf_column *columns = nullptr; int num_columns = 0; ASSERT_EQ(gdf_from_dlpack(&columns, &num_columns, mng_tensor), GDF_SUCCESS); ASSERT_NE(columns, nullptr); ASSERT_EQ(num_columns, width); for (int64_t c = 0; c < num_columns; ++c) { ASSERT_EQ(columns[c].size, length); T *output = new T[length]; cudaMemcpy(output, columns[c].data, length * sizeof(T), cudaMemcpyDefault); for (int64_t i = 0; i < length; i++) EXPECT_EQ(static_cast<T>(i), output[i]); delete [] output; gdf_column_free(&columns[c]); } delete [] columns; } TYPED_TEST(DLPackTypedTest, ToDLPack_SingleColumn) { using T = typename TestFixture::TestParam; constexpr int64_t length = 100; cudf::test::column_wrapper<T> col0(length, [](gdf_index_type i) { return i; }, [](gdf_index_type i) { return true; }); gdf_column **columns = new gdf_column*[1]; columns[0] = col0.get(); DLManagedTensor *tensor = new DLManagedTensor; ASSERT_EQ(gdf_to_dlpack(tensor, columns, 1), GDF_SUCCESS); ASSERT_EQ(tensor->dl_tensor.ndim, 1); ASSERT_EQ(tensor->dl_tensor.shape[0], length); T *output = new T[length]; cudaMemcpy(output, tensor->dl_tensor.data, length * sizeof(T), cudaMemcpyDefault); for (int64_t i = 0; i < length; i++) EXPECT_EQ(static_cast<T>(i), output[i]); delete [] output; tensor->deleter(tensor); delete [] columns; } TYPED_TEST(DLPackTypedTest, ToDLPack_MultiColumn) { using T = typename TestFixture::TestParam; constexpr int64_t length = 100; constexpr int64_t width = 3; cudf::test::column_wrapper<T>* cols[width]; gdf_column *columns[width]; for (int64_t c = 0; c < width; c++) { cols[c] = new cudf::test::column_wrapper<T>(length, [c](gdf_index_type i) { return i*(c+1); }, [](gdf_index_type i) { return true; }); columns[c] = cols[c]->get(); } DLManagedTensor *tensor = new DLManagedTensor; ASSERT_EQ(gdf_to_dlpack(tensor, columns, width), GDF_SUCCESS); ASSERT_EQ(tensor->dl_tensor.ndim, 2); ASSERT_EQ(tensor->dl_tensor.shape[0], length); ASSERT_EQ(tensor->dl_tensor.shape[1], width); T *output = new T[tensor_size(tensor->dl_tensor)/sizeof(T)]; cudaMemcpy(output, tensor->dl_tensor.data, width * length * sizeof(T), cudaMemcpyDefault); for (int64_t c = 0; c < width; c++) { T *o = &output[c * length]; for (int64_t i = 0; i < length; i++) EXPECT_EQ(static_cast<T>(i*(c+1)), o[i]); } delete [] output; tensor->deleter(tensor); for (int64_t c = 0; c < width; c++) delete cols[c]; }
293df292a187805901b927050828de9671dc0404.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ extern "C" __global__ void __launch_bounds__(128) sgemm_nn_vec_128x64 ( unsigned* param_Rand, const float* param_A, const float* param_B, float* param_C, int param_lda, int param_ldb8, int param_ldc, int param_m, int param_n, int param_k, float param_alpha, float param_beta, int param_flags, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 64*8*2 + 4]; int tid = threadIdx.x; share[tid] = 1; param_C[tid] = share[127-tid]; }
293df292a187805901b927050828de9671dc0404.cu
/* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ extern "C" __global__ void __launch_bounds__(128) sgemm_nn_vec_128x64 ( unsigned* param_Rand, const float* param_A, const float* param_B, float* param_C, int param_lda, int param_ldb8, int param_ldc, int param_m, int param_n, int param_k, float param_alpha, float param_beta, int param_flags, int param_ldaz, int param_ldbz, int param_ldcz, int param_batch_loops ) { __shared__ float share[128*8*2 + 64*8*2 + 4]; int tid = threadIdx.x; share[tid] = 1; param_C[tid] = share[127-tid]; }
7e0a8c1611486d56c0bfab18a87cee9092892c81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "crop_layer.h" #include "utils.h" #include "hip/hip_runtime.h" #include "image.h" } #if GPU __device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c) { if(x < 0 || x >= w || y < 0 || y >= h) return 0; return image[x + w*(y + c*h)]; } __device__ float3 rgb_to_hsv_kernel(float3 rgb) { float r = rgb.x; float g = rgb.y; float b = rgb.z; float h, s, v; float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b); float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b); float delta = max - min; v = max; if(max == 0){ s = 0; h = -1; }else{ s = delta/max; if(r == max){ h = (g - b) / delta; } else if (g == max) { h = 2 + (b - r) / delta; } else { h = 4 + (r - g) / delta; } if (h < 0) h += 6; } return make_float3(h, s, v); } __device__ float3 hsv_to_rgb_kernel(float3 hsv) { float h = hsv.x; float s = hsv.y; float v = hsv.z; float r, g, b; float f, p, q, t; if (s == 0) { r = g = b = v; } else { int index = (int) floorf(h); f = h - index; p = v*(1-s); q = v*(1-s*f); t = v*(1-s*(1-f)); if(index == 0){ r = v; g = t; b = p; } else if(index == 1){ r = q; g = v; b = p; } else if(index == 2){ r = p; g = v; b = t; } else if(index == 3){ r = p; g = q; b = v; } else if(index == 4){ r = t; g = p; b = v; } else { r = v; g = p; b = q; } } r = (r < 0) ? 0 : ((r > 1) ? 1 : r); g = (g < 0) ? 0 : ((g > 1) ? 1 : g); b = (b < 0) ? 0 : ((b > 1) ? 1 : b); return make_float3(r, g, b); } __device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c) { int ix = (int) floorf(x); int iy = (int) floorf(y); float dx = x - ix; float dy = y - iy; float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) + dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) + (1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) + dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c); return val; } __global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift) { int size = batch * w * h; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= size) return; int x = id % w; id /= w; int y = id % h; id /= h; float rshift = rand[0]; float gshift = rand[1]; float bshift = rand[2]; float r0 = rand[8*id + 0]; float r1 = rand[8*id + 1]; float r2 = rand[8*id + 2]; float r3 = rand[8*id + 3]; saturation = r0*(saturation - 1) + 1; saturation = (r1 > .5f) ? 1.f/saturation : saturation; exposure = r2*(exposure - 1) + 1; exposure = (r3 > .5f) ? 1.f/exposure : exposure; size_t offset = id * h * w * 3; image += offset; float r = image[x + w*(y + h*0)]; float g = image[x + w*(y + h*1)]; float b = image[x + w*(y + h*2)]; float3 rgb = make_float3(r,g,b); if(train){ float3 hsv = rgb_to_hsv_kernel(rgb); hsv.y *= saturation; hsv.z *= exposure; rgb = hsv_to_rgb_kernel(hsv); } else { shift = 0; } image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5f)*shift; image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5f)*shift; image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5f)*shift; } __global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= size) return; float cx = w/2.f; float cy = h/2.f; int count = id; int j = id % crop_width; id /= crop_width; int i = id % crop_height; id /= crop_height; int k = id % c; id /= c; int b = id; float r4 = rand[8*b + 4]; float r5 = rand[8*b + 5]; float r6 = rand[8*b + 6]; float r7 = rand[8*b + 7]; float dw = (w - crop_width)*r4; float dh = (h - crop_height)*r5; flip = (flip && (r6 > .5f)); angle = 2*angle*r7 - angle; if(!train){ dw = (w - crop_width)/2.f; dh = (h - crop_height)/2.f; flip = 0; angle = 0; } input += w*h*c*b; float x = (flip) ? w - dw - j - 1 : j + dw; float y = i + dh; float rx = cosf(angle)*(x-cx) - sinf(angle)*(y-cy) + cx; float ry = sinf(angle)*(x-cx) + cosf(angle)*(y-cy) + cy; output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k); } extern "C" void forward_crop_layer_gpu(crop_layer layer, network net) { cuda_random(layer.rand_gpu, layer.batch*8); float radians = layer.angle*3.14159265f/180.f; float scale = 2; float translate = -1; if(layer.noadjust){ scale = 1; translate = 0; } int size = layer.batch * layer.w * layer.h; hipLaunchKernelGGL(( levels_image_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, net.input_gpu, layer.rand_gpu, layer.batch, layer.w, layer.h, net.train, layer.saturation, layer.exposure, translate, scale, layer.shift); check_error(hipPeekAtLastError()); size = layer.batch*layer.c*layer.out_w*layer.out_h; hipLaunchKernelGGL(( forward_crop_layer_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, net.input_gpu, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, net.train, layer.flip, radians, layer.output_gpu); check_error(hipPeekAtLastError()); /* cuda_pull_array(layer.output_gpu, layer.output, size); image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch)); image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch)); image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch)); translate_image(im, -translate); scale_image(im, 1/scale); translate_image(im2, -translate); scale_image(im2, 1/scale); translate_image(im3, -translate); scale_image(im3, 1/scale); show_image(im, "cropped"); show_image(im2, "cropped2"); show_image(im3, "cropped3"); cvWaitKey(0); */ } #endif
7e0a8c1611486d56c0bfab18a87cee9092892c81.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "crop_layer.h" #include "utils.h" #include "cuda.h" #include "image.h" } #if GPU __device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c) { if(x < 0 || x >= w || y < 0 || y >= h) return 0; return image[x + w*(y + c*h)]; } __device__ float3 rgb_to_hsv_kernel(float3 rgb) { float r = rgb.x; float g = rgb.y; float b = rgb.z; float h, s, v; float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b); float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b); float delta = max - min; v = max; if(max == 0){ s = 0; h = -1; }else{ s = delta/max; if(r == max){ h = (g - b) / delta; } else if (g == max) { h = 2 + (b - r) / delta; } else { h = 4 + (r - g) / delta; } if (h < 0) h += 6; } return make_float3(h, s, v); } __device__ float3 hsv_to_rgb_kernel(float3 hsv) { float h = hsv.x; float s = hsv.y; float v = hsv.z; float r, g, b; float f, p, q, t; if (s == 0) { r = g = b = v; } else { int index = (int) floorf(h); f = h - index; p = v*(1-s); q = v*(1-s*f); t = v*(1-s*(1-f)); if(index == 0){ r = v; g = t; b = p; } else if(index == 1){ r = q; g = v; b = p; } else if(index == 2){ r = p; g = v; b = t; } else if(index == 3){ r = p; g = q; b = v; } else if(index == 4){ r = t; g = p; b = v; } else { r = v; g = p; b = q; } } r = (r < 0) ? 0 : ((r > 1) ? 1 : r); g = (g < 0) ? 0 : ((g > 1) ? 1 : g); b = (b < 0) ? 0 : ((b > 1) ? 1 : b); return make_float3(r, g, b); } __device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c) { int ix = (int) floorf(x); int iy = (int) floorf(y); float dx = x - ix; float dy = y - iy; float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) + dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) + (1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) + dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c); return val; } __global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift) { int size = batch * w * h; int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= size) return; int x = id % w; id /= w; int y = id % h; id /= h; float rshift = rand[0]; float gshift = rand[1]; float bshift = rand[2]; float r0 = rand[8*id + 0]; float r1 = rand[8*id + 1]; float r2 = rand[8*id + 2]; float r3 = rand[8*id + 3]; saturation = r0*(saturation - 1) + 1; saturation = (r1 > .5f) ? 1.f/saturation : saturation; exposure = r2*(exposure - 1) + 1; exposure = (r3 > .5f) ? 1.f/exposure : exposure; size_t offset = id * h * w * 3; image += offset; float r = image[x + w*(y + h*0)]; float g = image[x + w*(y + h*1)]; float b = image[x + w*(y + h*2)]; float3 rgb = make_float3(r,g,b); if(train){ float3 hsv = rgb_to_hsv_kernel(rgb); hsv.y *= saturation; hsv.z *= exposure; rgb = hsv_to_rgb_kernel(hsv); } else { shift = 0; } image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5f)*shift; image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5f)*shift; image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5f)*shift; } __global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= size) return; float cx = w/2.f; float cy = h/2.f; int count = id; int j = id % crop_width; id /= crop_width; int i = id % crop_height; id /= crop_height; int k = id % c; id /= c; int b = id; float r4 = rand[8*b + 4]; float r5 = rand[8*b + 5]; float r6 = rand[8*b + 6]; float r7 = rand[8*b + 7]; float dw = (w - crop_width)*r4; float dh = (h - crop_height)*r5; flip = (flip && (r6 > .5f)); angle = 2*angle*r7 - angle; if(!train){ dw = (w - crop_width)/2.f; dh = (h - crop_height)/2.f; flip = 0; angle = 0; } input += w*h*c*b; float x = (flip) ? w - dw - j - 1 : j + dw; float y = i + dh; float rx = cosf(angle)*(x-cx) - sinf(angle)*(y-cy) + cx; float ry = sinf(angle)*(x-cx) + cosf(angle)*(y-cy) + cy; output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k); } extern "C" void forward_crop_layer_gpu(crop_layer layer, network net) { cuda_random(layer.rand_gpu, layer.batch*8); float radians = layer.angle*3.14159265f/180.f; float scale = 2; float translate = -1; if(layer.noadjust){ scale = 1; translate = 0; } int size = layer.batch * layer.w * layer.h; levels_image_kernel<<<cuda_gridsize(size), BLOCK>>>(net.input_gpu, layer.rand_gpu, layer.batch, layer.w, layer.h, net.train, layer.saturation, layer.exposure, translate, scale, layer.shift); check_error(cudaPeekAtLastError()); size = layer.batch*layer.c*layer.out_w*layer.out_h; forward_crop_layer_kernel<<<cuda_gridsize(size), BLOCK>>>(net.input_gpu, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, net.train, layer.flip, radians, layer.output_gpu); check_error(cudaPeekAtLastError()); /* cuda_pull_array(layer.output_gpu, layer.output, size); image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch)); image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch)); image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch)); translate_image(im, -translate); scale_image(im, 1/scale); translate_image(im2, -translate); scale_image(im2, 1/scale); translate_image(im3, -translate); scale_image(im3, 1/scale); show_image(im, "cropped"); show_image(im2, "cropped2"); show_image(im3, "cropped3"); cvWaitKey(0); */ } #endif
b96ba43294148aa0a7962e8ff14ed7d1848dd96f.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/core/TensorBase.h> #include <ATen/Dispatch.h> #include <ATen/native/hip/ScanKernels.h> #include <ATen/native/hip/ScanUtils.cuh> #include <limits> #include <functional> namespace at { namespace native { void launch_cummax_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) { AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "cummax_cuda", [&]() { scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest(); scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>()); }); } void launch_cummin_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) { AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "cummin_cuda", [&]() { scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max(); scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>()); }); } }} // namespace at::native
b96ba43294148aa0a7962e8ff14ed7d1848dd96f.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/core/TensorBase.h> #include <ATen/Dispatch.h> #include <ATen/native/cuda/ScanKernels.h> #include <ATen/native/cuda/ScanUtils.cuh> #include <limits> #include <functional> namespace at { namespace native { void launch_cummax_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) { AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "cummax_cuda", [&]() { scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest(); scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>()); }); } void launch_cummin_cuda_kernel(const TensorBase& self, const TensorBase& values, const TensorBase& indices, int64_t dim) { AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "cummin_cuda", [&]() { scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max(); scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>()); }); } }} // namespace at::native
f3911d29a5f34e9b430a481709d1dc297e51265c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <algorithm> #include <string> #include <fstream> #include <cmath> #include <cstdio> #include <cstdlib> #include "mpi.h" #include <thrust/extrema.h> #include <thrust/device_ptr.h> #define CSC(call) \ do { \ hipError_t res = call; \ if (res != hipSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, hipGetErrorString(res)); \ exit(0); \ } \ } while(0) #define _i(i, j, k) ((((i) + 1) * (ny + 2) + ((j) + 1)) * (nz + 2) + ((k) + 1)) #define _ib(i, j, k) (((i) * nby + (j)) * nbz + (k)) #define _ibx(id) ((((id) / nbz) / nby)) #define _iby(id) ((((id) / nbz) % nby)) #define _ibz(id) (((id) % nbz)) __global__ void kernel_copy_to_buff_yz(double* data, double* buff, int nx, int ny, int nz, int x_c) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int j, k; for (j = idx; j < ny; j += offsetx) for (k = idy; k < nz; k += offsety) buff[j * nz + k] = data[_i(x_c, j, k)]; } __global__ void kernel_copy_to_buff_xz(double* data, double* buff, int nx, int ny, int nz, int y_c) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int i, k; for (i = idx; i < nx; i += offsetx) for (k = idy; k < nz; k += offsety) buff[i * nz + k] = data[_i(i, y_c, k)]; } __global__ void kernel_copy_to_buff_xy(double* data, double* buff, int nx, int ny, int nz, int z_c) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int i, j; for (i = idx; i < nx; i += offsetx) for (j = idy; j < ny; j += offsety) buff[i * ny + j] = data[_i(i, j, z_c)]; } __global__ void kernel_copy_from_buff_yz(double* data, double* buff, int nx, int ny, int nz, int x_c, double bc) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int j, k; if (buff) { for (j = idx; j < ny; j += offsetx) for (k = idy; k < nz; k += offsety) data[_i(x_c, j, k)] = buff[j * nz + k]; } else { for (j = idx; j < ny; j += offsetx) for (k = idy; k < nz; k += offsety) data[_i(x_c, j, k)] = bc; } } __global__ void kernel_copy_from_buff_xz(double* data, double* buff, int nx, int ny, int nz, int y_c, double bc) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int i, k; if (buff) { for (i = idx; i < nx; i += offsetx) for (k = idy; k < nz; k += offsety) data[_i(i, y_c, k)] = buff[i * nz + k]; } else { for (i = idx; i < nx; i += offsetx) for (k = idy; k < nz; k += offsety) data[_i(i, y_c, k)] = bc; } } __global__ void kernel_copy_from_buff_xy(double* data, double* buff, int nx, int ny, int nz, int z_c, double bc) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int i, j; if (buff) { for (i = idx; i < nx; i += offsetx) for (j = idy; j < ny; j += offsety) data[_i(i, j, z_c)] = buff[i * ny + j]; } else { for (i = idx; i < nx; i += offsetx) for (j = idy; j < ny; j += offsety) data[_i(i, j, z_c)] = bc; } } __global__ void kernel(double* data, double* next, double* errors, int nx, int ny, int nz, double hx, double hy, double hz) { int idz = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; int idx = blockDim.z * blockIdx.z + threadIdx.z; int offsetz = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.z * gridDim.z; for (int i = idx; i < nx; i += offsetx) for (int j = idy; j < ny; j += offsety) for (int k = idz; k < nz; k += offsetz) { next[_i(i, j, k)] = 0.5 * ((data[_i(i + 1, j, k)] + data[_i(i - 1, j, k)]) / (hx * hx) + (data[_i(i, j + 1, k)] + data[_i(i, j - 1, k)]) / (hy * hy) + (data[_i(i, j, k + 1)] + data[_i(i, j, k - 1)]) / (hz * hz)) / (1.0 / (hx * hx) + 1.0 / (hy * hy) + 1.0 / (hz * hz)); errors[i * ny * nz + j * nz + k] = ::fabs(next[_i(i, j, k)] - data[_i(i, j, k)]); } } int main(int argc, char* argv[]) { int ib, jb, kb; int i, j, k; int id_proc, nbx, nby, nbz, nx, ny, nz; double max_err, err; double hx, hy, hz; double eps, lx, ly, lz, bc_down, bc_up, bc_left, bc_right, bc_front, bc_back, u_0; double *data, *dev_data, *next, *dev_next, *buff, *dev_buff, *dev_errors, *temp, *recvbuf_err; char *file_buff; char file_name[100]; int device_count; hipGetDeviceCount(&device_count); MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &id_proc); MPI_Barrier(MPI_COMM_WORLD); hipSetDevice(id_proc % device_count); if (id_proc == 0) { std::cin >> nbx >> nby >> nbz; std::cin >> nx >> ny >> nz; std::cin >> file_name; std::cin >> eps; std::cin >> lx >> ly >> lz; std::cin >> bc_down >> bc_up >> bc_left >> bc_right >> bc_front >> bc_back; std::cin >> u_0; } MPI_Bcast(&nbx, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nby, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nbz, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nx, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&ny, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nz, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&eps, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&lx, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&ly, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&lz, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_down, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_up, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_left, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_right, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_front, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_back, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&u_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&file_name, 100, MPI_CHAR, 0, MPI_COMM_WORLD); ib = _ibx(id_proc); jb = _iby(id_proc); kb = _ibz(id_proc); hx = lx / (nx * nbx); hy = ly / (ny * nby); hz = lz / (nz * nbz); int n_max = ::max(nx, ::max(ny, nz)); data = (double*)malloc((nx + 2) * (ny + 2) * (nz + 2) * sizeof(double)); next = (double*)malloc((nx + 2) * (ny + 2) * (nz + 2) * sizeof(double)); buff = (double*)malloc(n_max * n_max * sizeof(double)); recvbuf_err = (double*)malloc(nbx * nby * nbz * sizeof(double)); CSC(hipMalloc(&dev_data, (nx + 2) * (ny + 2) * (nz + 2) * sizeof(double))); CSC(hipMalloc(&dev_next, (nx + 2) * (ny + 2) * (nz + 2) * sizeof(double))); CSC(hipMalloc(&dev_buff, n_max * n_max * sizeof(double))); CSC(hipMalloc(&dev_errors, nx * ny * nz * sizeof(double))); int buffer_size = 6 * (n_max * n_max * sizeof(double) + MPI_BSEND_OVERHEAD); double* buffer = (double*)malloc(buffer_size); MPI_Buffer_attach(buffer, buffer_size); for (i = 0; i < nx; ++i) for (j = 0; j < ny; ++j) for (k = 0; k < nz; ++k) data[_i(i, j, k)] = u_0; CSC(hipMemcpy(dev_data, data, (nx + 2) * (ny + 2) * (nz + 2) * sizeof(double), hipMemcpyHostToDevice)); dim3 blocks(8, 8); dim3 threads(32, 4); for (;;) { MPI_Barrier(MPI_COMM_WORLD); if (ib + 1 < nbx) { kernel_copy_to_buff_yz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, nx - 1); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, ny * nz * sizeof(double), hipMemcpyDeviceToHost)); MPI_Bsend(buff, ny * nz, MPI_DOUBLE, _ib(ib + 1, jb, kb), id_proc, MPI_COMM_WORLD); } if (ib > 0) { kernel_copy_to_buff_yz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, 0); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, ny * nz * sizeof(double), hipMemcpyDeviceToHost)); MPI_Bsend(buff, ny * nz, MPI_DOUBLE, _ib(ib - 1, jb, kb), id_proc, MPI_COMM_WORLD); } if (jb + 1 < nby) { kernel_copy_to_buff_xz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, ny - 1); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, nx * nz * sizeof(double), hipMemcpyDeviceToHost)); MPI_Bsend(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb + 1, kb), id_proc, MPI_COMM_WORLD); } if (jb > 0) { kernel_copy_to_buff_xz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, 0); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, nx * nz * sizeof(double), hipMemcpyDeviceToHost)); MPI_Bsend(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb - 1, kb), id_proc, MPI_COMM_WORLD); } if (kb + 1 < nbz) { kernel_copy_to_buff_xy << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, nz - 1); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, nx * ny * sizeof(double), hipMemcpyDeviceToHost)); MPI_Bsend(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb + 1), id_proc, MPI_COMM_WORLD); } if (kb > 0) { kernel_copy_to_buff_xy << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, 0); CSC(hipGetLastError()); CSC(hipMemcpy(buff, dev_buff, nx * ny * sizeof(double), hipMemcpyDeviceToHost)); MPI_Bsend(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb - 1), id_proc, MPI_COMM_WORLD); } if (ib + 1 < nbx) { MPI_Recv(buff, ny * nz, MPI_DOUBLE, _ib(ib + 1, jb, kb), _ib(ib + 1, jb, kb), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, ny * nz * sizeof(double), hipMemcpyHostToDevice)); kernel_copy_from_buff_yz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, nx, 0.0); } else { kernel_copy_from_buff_yz << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, nx, bc_right); } CSC(hipGetLastError()); if (ib > 0) { MPI_Recv(buff, ny * nz, MPI_DOUBLE, _ib(ib - 1, jb, kb), _ib(ib - 1, jb, kb), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, ny * nz * sizeof(double), hipMemcpyHostToDevice)); kernel_copy_from_buff_yz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, -1, 0.0); } else { kernel_copy_from_buff_yz << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, -1, bc_left); } CSC(hipGetLastError()); if (jb + 1 < nby) { MPI_Recv(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb + 1, kb), _ib(ib, jb + 1, kb), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, nx * nz * sizeof(double), hipMemcpyHostToDevice)); kernel_copy_from_buff_xz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, ny, 0.0); } else { kernel_copy_from_buff_xz << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, ny, bc_back); } CSC(hipGetLastError()); if (jb > 0) { MPI_Recv(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb - 1, kb), _ib(ib, jb - 1, kb), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, nx * nz * sizeof(double), hipMemcpyHostToDevice)); kernel_copy_from_buff_xz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, -1, 0.0); } else { kernel_copy_from_buff_xz << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, -1, bc_front); } CSC(hipGetLastError()); if (kb + 1 < nbz) { MPI_Recv(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb + 1), _ib(ib, jb, kb + 1), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, nx * ny * sizeof(double), hipMemcpyHostToDevice)); kernel_copy_from_buff_xy << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, nz, 0.0); } else { kernel_copy_from_buff_xy << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, nz, bc_up); } CSC(hipGetLastError()); if (kb > 0) { MPI_Recv(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb - 1), _ib(ib, jb, kb - 1), MPI_COMM_WORLD, &status); CSC(hipMemcpy(dev_buff, buff, nx * ny * sizeof(double), hipMemcpyHostToDevice)); kernel_copy_from_buff_xy << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, -1, 0.0); } else { kernel_copy_from_buff_xy << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, -1, bc_down); } CSC(hipGetLastError()); MPI_Barrier(MPI_COMM_WORLD); kernel << <dim3(4, 4, 4), dim3(32, 4, 4) >> > (dev_data, dev_next, dev_errors, nx, ny, nz, hx, hy, hz); CSC(hipGetLastError()); err = 0.0; thrust::device_ptr<double> errors_p = thrust::device_pointer_cast(dev_errors); thrust::device_ptr<double> err_p = thrust::max_element(errors_p, errors_p + nx * ny * nz); err = *err_p; temp = dev_next; dev_next = dev_data; dev_data = temp; MPI_Allgather(&err, 1, MPI_DOUBLE, recvbuf_err, 1, MPI_DOUBLE, MPI_COMM_WORLD); max_err = 0.0; for (i = 0; i < nbx * nby * nbz; ++i) { if (recvbuf_err[i] > max_err) max_err = recvbuf_err[i]; } if (max_err < eps) break; } CSC(hipMemcpy(data, dev_data, (nx + 2) * (ny + 2) * (nz + 2) * sizeof(double), hipMemcpyDeviceToHost)); CSC(hipFree(dev_data)); CSC(hipFree(dev_next)); CSC(hipFree(dev_buff)); CSC(hipFree(dev_errors)); MPI_Barrier(MPI_COMM_WORLD); int n_size = 20; file_buff = (char*)malloc(nx * ny * nz * n_size * sizeof(char)); memset(file_buff, ' ', nx * ny * nz * n_size * sizeof(char)); for (k = 0; k < nz; ++k) for (j = 0; j < ny; ++j) { for (i = 0; i < nx; ++i) sprintf(file_buff + (k * ny * nx + j * nx + i) * n_size, "%e ", data[_i(i, j, k)]); if (ib + 1 == nbx) { file_buff[(k * ny * nx + j * nx + nx) * n_size - 1] = '\n'; if (jb + 1 == nby && j + 1 == ny) file_buff[(k * ny * nx + j * nx + nx) * n_size - 2] = '\n'; } } for (i = 0; i < nx * ny * nz * n_size; ++i) if (file_buff[i] == '\0') file_buff[i] = ' '; MPI_File fp; MPI_Datatype type1; MPI_Datatype type2; MPI_Type_create_hvector(ny, nx * n_size * sizeof(char), nx * n_size * nbx * sizeof(char), MPI_CHAR, &type1); MPI_Type_commit(&type1); MPI_Type_create_hvector(nz, 1, nby * nx * ny * n_size * nbx * sizeof(char), type1, &type2); MPI_Type_commit(&type2); MPI_File_open(MPI_COMM_WORLD, file_name, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp); MPI_File_set_view(fp, (kb * nbx * nby * nz + jb * nbx) * (nx * ny * n_size * sizeof(char)) + ib * nx * n_size * sizeof(char), MPI_CHAR, type2, "native", MPI_INFO_NULL); MPI_File_write_all(fp, file_buff, nx * ny * nz * n_size * sizeof(char), MPI_CHAR, MPI_STATUS_IGNORE); MPI_File_close(&fp); MPI_Type_free(&type1); MPI_Type_free(&type2); MPI_Buffer_detach(buffer, &buffer_size); MPI_Finalize(); free(buff); free(data); free(next); free(buffer); free(file_buff); free(recvbuf_err); return 0; }
f3911d29a5f34e9b430a481709d1dc297e51265c.cu
#include <iostream> #include <algorithm> #include <string> #include <fstream> #include <cmath> #include <cstdio> #include <cstdlib> #include "mpi.h" #include <thrust/extrema.h> #include <thrust/device_ptr.h> #define CSC(call) \ do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(0); \ } \ } while(0) #define _i(i, j, k) ((((i) + 1) * (ny + 2) + ((j) + 1)) * (nz + 2) + ((k) + 1)) #define _ib(i, j, k) (((i) * nby + (j)) * nbz + (k)) #define _ibx(id) ((((id) / nbz) / nby)) #define _iby(id) ((((id) / nbz) % nby)) #define _ibz(id) (((id) % nbz)) __global__ void kernel_copy_to_buff_yz(double* data, double* buff, int nx, int ny, int nz, int x_c) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int j, k; for (j = idx; j < ny; j += offsetx) for (k = idy; k < nz; k += offsety) buff[j * nz + k] = data[_i(x_c, j, k)]; } __global__ void kernel_copy_to_buff_xz(double* data, double* buff, int nx, int ny, int nz, int y_c) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int i, k; for (i = idx; i < nx; i += offsetx) for (k = idy; k < nz; k += offsety) buff[i * nz + k] = data[_i(i, y_c, k)]; } __global__ void kernel_copy_to_buff_xy(double* data, double* buff, int nx, int ny, int nz, int z_c) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int i, j; for (i = idx; i < nx; i += offsetx) for (j = idy; j < ny; j += offsety) buff[i * ny + j] = data[_i(i, j, z_c)]; } __global__ void kernel_copy_from_buff_yz(double* data, double* buff, int nx, int ny, int nz, int x_c, double bc) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int j, k; if (buff) { for (j = idx; j < ny; j += offsetx) for (k = idy; k < nz; k += offsety) data[_i(x_c, j, k)] = buff[j * nz + k]; } else { for (j = idx; j < ny; j += offsetx) for (k = idy; k < nz; k += offsety) data[_i(x_c, j, k)] = bc; } } __global__ void kernel_copy_from_buff_xz(double* data, double* buff, int nx, int ny, int nz, int y_c, double bc) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int i, k; if (buff) { for (i = idx; i < nx; i += offsetx) for (k = idy; k < nz; k += offsety) data[_i(i, y_c, k)] = buff[i * nz + k]; } else { for (i = idx; i < nx; i += offsetx) for (k = idy; k < nz; k += offsety) data[_i(i, y_c, k)] = bc; } } __global__ void kernel_copy_from_buff_xy(double* data, double* buff, int nx, int ny, int nz, int z_c, double bc) { int idy = blockDim.x * blockIdx.x + threadIdx.x; int idx = blockDim.y * blockIdx.y + threadIdx.y; int offsety = blockDim.x * gridDim.x; int offsetx = blockDim.y * gridDim.y; int i, j; if (buff) { for (i = idx; i < nx; i += offsetx) for (j = idy; j < ny; j += offsety) data[_i(i, j, z_c)] = buff[i * ny + j]; } else { for (i = idx; i < nx; i += offsetx) for (j = idy; j < ny; j += offsety) data[_i(i, j, z_c)] = bc; } } __global__ void kernel(double* data, double* next, double* errors, int nx, int ny, int nz, double hx, double hy, double hz) { int idz = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; int idx = blockDim.z * blockIdx.z + threadIdx.z; int offsetz = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; int offsetx = blockDim.z * gridDim.z; for (int i = idx; i < nx; i += offsetx) for (int j = idy; j < ny; j += offsety) for (int k = idz; k < nz; k += offsetz) { next[_i(i, j, k)] = 0.5 * ((data[_i(i + 1, j, k)] + data[_i(i - 1, j, k)]) / (hx * hx) + (data[_i(i, j + 1, k)] + data[_i(i, j - 1, k)]) / (hy * hy) + (data[_i(i, j, k + 1)] + data[_i(i, j, k - 1)]) / (hz * hz)) / (1.0 / (hx * hx) + 1.0 / (hy * hy) + 1.0 / (hz * hz)); errors[i * ny * nz + j * nz + k] = std::fabs(next[_i(i, j, k)] - data[_i(i, j, k)]); } } int main(int argc, char* argv[]) { int ib, jb, kb; int i, j, k; int id_proc, nbx, nby, nbz, nx, ny, nz; double max_err, err; double hx, hy, hz; double eps, lx, ly, lz, bc_down, bc_up, bc_left, bc_right, bc_front, bc_back, u_0; double *data, *dev_data, *next, *dev_next, *buff, *dev_buff, *dev_errors, *temp, *recvbuf_err; char *file_buff; char file_name[100]; int device_count; cudaGetDeviceCount(&device_count); MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &id_proc); MPI_Barrier(MPI_COMM_WORLD); cudaSetDevice(id_proc % device_count); if (id_proc == 0) { std::cin >> nbx >> nby >> nbz; std::cin >> nx >> ny >> nz; std::cin >> file_name; std::cin >> eps; std::cin >> lx >> ly >> lz; std::cin >> bc_down >> bc_up >> bc_left >> bc_right >> bc_front >> bc_back; std::cin >> u_0; } MPI_Bcast(&nbx, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nby, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nbz, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nx, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&ny, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nz, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&eps, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&lx, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&ly, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&lz, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_down, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_up, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_left, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_right, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_front, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&bc_back, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&u_0, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(&file_name, 100, MPI_CHAR, 0, MPI_COMM_WORLD); ib = _ibx(id_proc); jb = _iby(id_proc); kb = _ibz(id_proc); hx = lx / (nx * nbx); hy = ly / (ny * nby); hz = lz / (nz * nbz); int n_max = std::max(nx, std::max(ny, nz)); data = (double*)malloc((nx + 2) * (ny + 2) * (nz + 2) * sizeof(double)); next = (double*)malloc((nx + 2) * (ny + 2) * (nz + 2) * sizeof(double)); buff = (double*)malloc(n_max * n_max * sizeof(double)); recvbuf_err = (double*)malloc(nbx * nby * nbz * sizeof(double)); CSC(cudaMalloc(&dev_data, (nx + 2) * (ny + 2) * (nz + 2) * sizeof(double))); CSC(cudaMalloc(&dev_next, (nx + 2) * (ny + 2) * (nz + 2) * sizeof(double))); CSC(cudaMalloc(&dev_buff, n_max * n_max * sizeof(double))); CSC(cudaMalloc(&dev_errors, nx * ny * nz * sizeof(double))); int buffer_size = 6 * (n_max * n_max * sizeof(double) + MPI_BSEND_OVERHEAD); double* buffer = (double*)malloc(buffer_size); MPI_Buffer_attach(buffer, buffer_size); for (i = 0; i < nx; ++i) for (j = 0; j < ny; ++j) for (k = 0; k < nz; ++k) data[_i(i, j, k)] = u_0; CSC(cudaMemcpy(dev_data, data, (nx + 2) * (ny + 2) * (nz + 2) * sizeof(double), cudaMemcpyHostToDevice)); dim3 blocks(8, 8); dim3 threads(32, 4); for (;;) { MPI_Barrier(MPI_COMM_WORLD); if (ib + 1 < nbx) { kernel_copy_to_buff_yz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, nx - 1); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, ny * nz * sizeof(double), cudaMemcpyDeviceToHost)); MPI_Bsend(buff, ny * nz, MPI_DOUBLE, _ib(ib + 1, jb, kb), id_proc, MPI_COMM_WORLD); } if (ib > 0) { kernel_copy_to_buff_yz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, 0); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, ny * nz * sizeof(double), cudaMemcpyDeviceToHost)); MPI_Bsend(buff, ny * nz, MPI_DOUBLE, _ib(ib - 1, jb, kb), id_proc, MPI_COMM_WORLD); } if (jb + 1 < nby) { kernel_copy_to_buff_xz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, ny - 1); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, nx * nz * sizeof(double), cudaMemcpyDeviceToHost)); MPI_Bsend(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb + 1, kb), id_proc, MPI_COMM_WORLD); } if (jb > 0) { kernel_copy_to_buff_xz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, 0); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, nx * nz * sizeof(double), cudaMemcpyDeviceToHost)); MPI_Bsend(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb - 1, kb), id_proc, MPI_COMM_WORLD); } if (kb + 1 < nbz) { kernel_copy_to_buff_xy << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, nz - 1); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, nx * ny * sizeof(double), cudaMemcpyDeviceToHost)); MPI_Bsend(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb + 1), id_proc, MPI_COMM_WORLD); } if (kb > 0) { kernel_copy_to_buff_xy << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, 0); CSC(cudaGetLastError()); CSC(cudaMemcpy(buff, dev_buff, nx * ny * sizeof(double), cudaMemcpyDeviceToHost)); MPI_Bsend(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb - 1), id_proc, MPI_COMM_WORLD); } if (ib + 1 < nbx) { MPI_Recv(buff, ny * nz, MPI_DOUBLE, _ib(ib + 1, jb, kb), _ib(ib + 1, jb, kb), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, ny * nz * sizeof(double), cudaMemcpyHostToDevice)); kernel_copy_from_buff_yz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, nx, 0.0); } else { kernel_copy_from_buff_yz << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, nx, bc_right); } CSC(cudaGetLastError()); if (ib > 0) { MPI_Recv(buff, ny * nz, MPI_DOUBLE, _ib(ib - 1, jb, kb), _ib(ib - 1, jb, kb), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, ny * nz * sizeof(double), cudaMemcpyHostToDevice)); kernel_copy_from_buff_yz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, -1, 0.0); } else { kernel_copy_from_buff_yz << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, -1, bc_left); } CSC(cudaGetLastError()); if (jb + 1 < nby) { MPI_Recv(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb + 1, kb), _ib(ib, jb + 1, kb), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, nx * nz * sizeof(double), cudaMemcpyHostToDevice)); kernel_copy_from_buff_xz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, ny, 0.0); } else { kernel_copy_from_buff_xz << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, ny, bc_back); } CSC(cudaGetLastError()); if (jb > 0) { MPI_Recv(buff, nx * nz, MPI_DOUBLE, _ib(ib, jb - 1, kb), _ib(ib, jb - 1, kb), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, nx * nz * sizeof(double), cudaMemcpyHostToDevice)); kernel_copy_from_buff_xz << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, -1, 0.0); } else { kernel_copy_from_buff_xz << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, -1, bc_front); } CSC(cudaGetLastError()); if (kb + 1 < nbz) { MPI_Recv(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb + 1), _ib(ib, jb, kb + 1), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, nx * ny * sizeof(double), cudaMemcpyHostToDevice)); kernel_copy_from_buff_xy << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, nz, 0.0); } else { kernel_copy_from_buff_xy << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, nz, bc_up); } CSC(cudaGetLastError()); if (kb > 0) { MPI_Recv(buff, nx * ny, MPI_DOUBLE, _ib(ib, jb, kb - 1), _ib(ib, jb, kb - 1), MPI_COMM_WORLD, &status); CSC(cudaMemcpy(dev_buff, buff, nx * ny * sizeof(double), cudaMemcpyHostToDevice)); kernel_copy_from_buff_xy << <blocks, threads >> > (dev_data, dev_buff, nx, ny, nz, -1, 0.0); } else { kernel_copy_from_buff_xy << <blocks, threads >> > (dev_data, NULL, nx, ny, nz, -1, bc_down); } CSC(cudaGetLastError()); MPI_Barrier(MPI_COMM_WORLD); kernel << <dim3(4, 4, 4), dim3(32, 4, 4) >> > (dev_data, dev_next, dev_errors, nx, ny, nz, hx, hy, hz); CSC(cudaGetLastError()); err = 0.0; thrust::device_ptr<double> errors_p = thrust::device_pointer_cast(dev_errors); thrust::device_ptr<double> err_p = thrust::max_element(errors_p, errors_p + nx * ny * nz); err = *err_p; temp = dev_next; dev_next = dev_data; dev_data = temp; MPI_Allgather(&err, 1, MPI_DOUBLE, recvbuf_err, 1, MPI_DOUBLE, MPI_COMM_WORLD); max_err = 0.0; for (i = 0; i < nbx * nby * nbz; ++i) { if (recvbuf_err[i] > max_err) max_err = recvbuf_err[i]; } if (max_err < eps) break; } CSC(cudaMemcpy(data, dev_data, (nx + 2) * (ny + 2) * (nz + 2) * sizeof(double), cudaMemcpyDeviceToHost)); CSC(cudaFree(dev_data)); CSC(cudaFree(dev_next)); CSC(cudaFree(dev_buff)); CSC(cudaFree(dev_errors)); MPI_Barrier(MPI_COMM_WORLD); int n_size = 20; file_buff = (char*)malloc(nx * ny * nz * n_size * sizeof(char)); memset(file_buff, ' ', nx * ny * nz * n_size * sizeof(char)); for (k = 0; k < nz; ++k) for (j = 0; j < ny; ++j) { for (i = 0; i < nx; ++i) sprintf(file_buff + (k * ny * nx + j * nx + i) * n_size, "%e ", data[_i(i, j, k)]); if (ib + 1 == nbx) { file_buff[(k * ny * nx + j * nx + nx) * n_size - 1] = '\n'; if (jb + 1 == nby && j + 1 == ny) file_buff[(k * ny * nx + j * nx + nx) * n_size - 2] = '\n'; } } for (i = 0; i < nx * ny * nz * n_size; ++i) if (file_buff[i] == '\0') file_buff[i] = ' '; MPI_File fp; MPI_Datatype type1; MPI_Datatype type2; MPI_Type_create_hvector(ny, nx * n_size * sizeof(char), nx * n_size * nbx * sizeof(char), MPI_CHAR, &type1); MPI_Type_commit(&type1); MPI_Type_create_hvector(nz, 1, nby * nx * ny * n_size * nbx * sizeof(char), type1, &type2); MPI_Type_commit(&type2); MPI_File_open(MPI_COMM_WORLD, file_name, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp); MPI_File_set_view(fp, (kb * nbx * nby * nz + jb * nbx) * (nx * ny * n_size * sizeof(char)) + ib * nx * n_size * sizeof(char), MPI_CHAR, type2, "native", MPI_INFO_NULL); MPI_File_write_all(fp, file_buff, nx * ny * nz * n_size * sizeof(char), MPI_CHAR, MPI_STATUS_IGNORE); MPI_File_close(&fp); MPI_Type_free(&type1); MPI_Type_free(&type2); MPI_Buffer_detach(buffer, &buffer_size); MPI_Finalize(); free(buff); free(data); free(next); free(buffer); free(file_buff); free(recvbuf_err); return 0; }
35028f8fad3fdaf8de38070a711a90fee3073d86.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "addOneColumnPerThread.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *a = NULL; hipMalloc(&a, XSIZE*YSIZE); double *b = NULL; hipMalloc(&b, XSIZE*YSIZE); double *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( addOneColumnPerThread), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( addOneColumnPerThread), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( addOneColumnPerThread), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
35028f8fad3fdaf8de38070a711a90fee3073d86.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "addOneColumnPerThread.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); double *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); double *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); addOneColumnPerThread<<<gridBlock,threadBlock>>>(a,b,c,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { addOneColumnPerThread<<<gridBlock,threadBlock>>>(a,b,c,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { addOneColumnPerThread<<<gridBlock,threadBlock>>>(a,b,c,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f11b69517be7a3ce20d6583070c3090d615b25e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int unpooled_height, const int unpooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, const Dtype* bottom_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1)); int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1)); int unpooled_index = uph * unpooled_width + upw; top_data += (n * channels + c) * unpooled_height * unpooled_width; if (bottom_mask) { const int mask_index = bottom_mask[index]; top_data[mask_index] = bottom_data[index]; } else { top_data[unpooled_index] = bottom_data[index]; } } } template <typename Dtype> __global__ void AveUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % unpooled_width + pad_w; int h = (index / unpooled_width) % unpooled_height + pad_h; int c = (index / unpooled_width / unpooled_height) % channels; int n = index / unpooled_width / unpooled_height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, width); Dtype distval = 0; bottom_data += (n * channels + c) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); distval += bottom_data[ph * width + pw] / pool_size; } } top_data[index] = distval; } } template <typename Dtype> __global__ void TileUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % unpooled_width + pad_w; int h = (index / unpooled_width) % unpooled_height + pad_h; int c = (index / unpooled_width / unpooled_height) % channels; int n = index / unpooled_width / unpooled_height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, width); Dtype distval = 0; bottom_data += (n * channels + c) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); distval += bottom_data[ph * width + pw]; } } top_data[index] = distval; } } template <typename Dtype> void UnpoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); int count = bottom[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); caffe_gpu_set(top[0]->count(), Dtype(0.), top_data); // We'll get the mask from bottom[1] if it's of size >1. const bool use_bottom_mask = bottom.size() > 1; const Dtype* bottom_mask = NULL; switch (this->layer_param_.unpooling_param().unpool()) { case UnpoolingParameter_UnpoolMethod_MAX: if (use_bottom_mask) { bottom_mask = bottom[1]->gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxUnpoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, unpooled_height_, unpooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, bottom_mask); break; case UnpoolingParameter_UnpoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AveUnpoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, top[0]->count(), bottom_data, bottom[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case UnpoolingParameter_UnpoolMethod_TILE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( TileUnpoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, top[0]->count(), bottom_data, bottom[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; default: LOG(FATAL) << "Unknown unpooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_mask, const int num, const int channels, const int height, const int width, const int unpooled_height, const int unpooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1)); int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1)); int unpooled_index = uph * unpooled_width + upw; top_diff += (n * channels + c) * unpooled_height * unpooled_width; if (bottom_mask) { const int mask_index = bottom_mask[index]; bottom_diff[index] = top_diff[mask_index]; } else { bottom_diff[index] = top_diff[unpooled_index]; } } } template <typename Dtype> __global__ void AveUnpoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, unpooled_height); wend = min(wend, unpooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * unpooled_height * unpooled_width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { gradient += top_diff[h * unpooled_width + w]; } } bottom_diff[index] = gradient / pool_size; } } template <typename Dtype> __global__ void TileUnpoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, unpooled_height); wend = min(wend, unpooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * unpooled_height * unpooled_width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { gradient += top_diff[h * unpooled_width + w]; } } bottom_diff[index] = gradient / pool_size; } } template <typename Dtype> void UnpoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll get the mask from bottom[1] if it's of size >1. const bool use_bottom_mask = bottom.size() > 1; const Dtype* bottom_mask = NULL; switch (this->layer_param_.unpooling_param().unpool()) { case UnpoolingParameter_UnpoolMethod_MAX: if (use_bottom_mask) { bottom_mask = bottom[1]->gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxUnpoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_mask, top[0]->num(), channels_, height_, width_, unpooled_height_, unpooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case UnpoolingParameter_UnpoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AveUnpoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->count(), top_diff, top[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case UnpoolingParameter_UnpoolMethod_TILE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( TileUnpoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->count(), top_diff, top[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown unpooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(UnpoolingLayer); } // namespace caffe
f11b69517be7a3ce20d6583070c3090d615b25e4.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int unpooled_height, const int unpooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, const Dtype* bottom_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1)); int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1)); int unpooled_index = uph * unpooled_width + upw; top_data += (n * channels + c) * unpooled_height * unpooled_width; if (bottom_mask) { const int mask_index = bottom_mask[index]; top_data[mask_index] = bottom_data[index]; } else { top_data[unpooled_index] = bottom_data[index]; } } } template <typename Dtype> __global__ void AveUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % unpooled_width + pad_w; int h = (index / unpooled_width) % unpooled_height + pad_h; int c = (index / unpooled_width / unpooled_height) % channels; int n = index / unpooled_width / unpooled_height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, width); Dtype distval = 0; bottom_data += (n * channels + c) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); distval += bottom_data[ph * width + pw] / pool_size; } } top_data[index] = distval; } } template <typename Dtype> __global__ void TileUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % unpooled_width + pad_w; int h = (index / unpooled_width) % unpooled_height + pad_h; int c = (index / unpooled_width / unpooled_height) % channels; int n = index / unpooled_width / unpooled_height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, width); Dtype distval = 0; bottom_data += (n * channels + c) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); distval += bottom_data[ph * width + pw]; } } top_data[index] = distval; } } template <typename Dtype> void UnpoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); int count = bottom[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); caffe_gpu_set(top[0]->count(), Dtype(0.), top_data); // We'll get the mask from bottom[1] if it's of size >1. const bool use_bottom_mask = bottom.size() > 1; const Dtype* bottom_mask = NULL; switch (this->layer_param_.unpooling_param().unpool()) { case UnpoolingParameter_UnpoolMethod_MAX: if (use_bottom_mask) { bottom_mask = bottom[1]->gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxUnpoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, unpooled_height_, unpooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, bottom_mask); break; case UnpoolingParameter_UnpoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AveUnpoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( top[0]->count(), bottom_data, bottom[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case UnpoolingParameter_UnpoolMethod_TILE: // NOLINT_NEXT_LINE(whitespace/operators) TileUnpoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( top[0]->count(), bottom_data, bottom[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; default: LOG(FATAL) << "Unknown unpooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_mask, const int num, const int channels, const int height, const int width, const int unpooled_height, const int unpooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1)); int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1)); int unpooled_index = uph * unpooled_width + upw; top_diff += (n * channels + c) * unpooled_height * unpooled_width; if (bottom_mask) { const int mask_index = bottom_mask[index]; bottom_diff[index] = top_diff[mask_index]; } else { bottom_diff[index] = top_diff[unpooled_index]; } } } template <typename Dtype> __global__ void AveUnpoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, unpooled_height); wend = min(wend, unpooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * unpooled_height * unpooled_width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { gradient += top_diff[h * unpooled_width + w]; } } bottom_diff[index] = gradient / pool_size; } } template <typename Dtype> __global__ void TileUnpoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, unpooled_height); wend = min(wend, unpooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * unpooled_height * unpooled_width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { gradient += top_diff[h * unpooled_width + w]; } } bottom_diff[index] = gradient / pool_size; } } template <typename Dtype> void UnpoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll get the mask from bottom[1] if it's of size >1. const bool use_bottom_mask = bottom.size() > 1; const Dtype* bottom_mask = NULL; switch (this->layer_param_.unpooling_param().unpool()) { case UnpoolingParameter_UnpoolMethod_MAX: if (use_bottom_mask) { bottom_mask = bottom[1]->gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxUnpoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_mask, top[0]->num(), channels_, height_, width_, unpooled_height_, unpooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case UnpoolingParameter_UnpoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AveUnpoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( bottom[0]->count(), top_diff, top[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case UnpoolingParameter_UnpoolMethod_TILE: // NOLINT_NEXT_LINE(whitespace/operators) TileUnpoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( bottom[0]->count(), top_diff, top[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown unpooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(UnpoolingLayer); } // namespace caffe
f757e979f17c8af22432ff424cc90db790527f89.hip
// !!! This is a file automatically generated by hipify!!! // // vector_add.cu // // Creado por Guadalupe Flores 22/06/20. // // Suma de vectores con 100 millones de registros utilizando 1 thread, 1 block de CUDA. // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define N 100000000 //100.000.000 #define MAX_ERR 1e-6 __global__ void vector_add(float* out, float* a, float* b, int n) { for (int i = 0; i < n; i++) { out[i] = a[i] + b[i]; } } int main() { float* a, * b, * out; float* d_a, * d_b, * d_out; // Asigno memoria host a = (float*)malloc(sizeof(float) * N); b = (float*)malloc(sizeof(float) * N); out = (float*)malloc(sizeof(float) * N); // Inicializo los array host for (int i = 0; i < N; i++) { a[i] = 1.0f; b[i] = 2.0f; } // Asigno memoria en la GPU hipMalloc((void**)&d_a, sizeof(float) * N); hipMalloc((void**)&d_b, sizeof(float) * N); hipMalloc((void**)&d_out, sizeof(float) * N); // Transfiero la memoria hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(float) * N, hipMemcpyHostToDevice); // Ejecuto el kernel hipLaunchKernelGGL(( vector_add), dim3(1),dim3(1), 0, 0, d_out, d_a, d_b, N); // Transfiero la memoria de vuelta para traer el resultado. hipMemcpy(out, d_out, sizeof(float) * N, hipMemcpyDeviceToHost); // Verificamos for (int i = 0; i < N; i++) { assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR); } printf("out[0] = %f\n", out[0]); printf("PASSED CUDA\n"); // Liberamos memoria del GPU hipFree(d_a); hipFree(d_b); hipFree(d_out); // Liberamos la memoria del host. free(a); free(b); free(out); }
f757e979f17c8af22432ff424cc90db790527f89.cu
// // vector_add.cu // // Creado por Guadalupe Flores 22/06/20. // // Suma de vectores con 100 millones de registros utilizando 1 thread, 1 block de CUDA. // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #define N 100000000 //100.000.000 #define MAX_ERR 1e-6 __global__ void vector_add(float* out, float* a, float* b, int n) { for (int i = 0; i < n; i++) { out[i] = a[i] + b[i]; } } int main() { float* a, * b, * out; float* d_a, * d_b, * d_out; // Asigno memoria host a = (float*)malloc(sizeof(float) * N); b = (float*)malloc(sizeof(float) * N); out = (float*)malloc(sizeof(float) * N); // Inicializo los array host for (int i = 0; i < N; i++) { a[i] = 1.0f; b[i] = 2.0f; } // Asigno memoria en la GPU cudaMalloc((void**)&d_a, sizeof(float) * N); cudaMalloc((void**)&d_b, sizeof(float) * N); cudaMalloc((void**)&d_out, sizeof(float) * N); // Transfiero la memoria cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice); // Ejecuto el kernel vector_add<<<1,1>>>(d_out, d_a, d_b, N); // Transfiero la memoria de vuelta para traer el resultado. cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost); // Verificamos for (int i = 0; i < N; i++) { assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR); } printf("out[0] = %f\n", out[0]); printf("PASSED CUDA\n"); // Liberamos memoria del GPU cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); // Liberamos la memoria del host. free(a); free(b); free(out); }
454df3b4acbe9dd0df05964f97a2cbad59149be6.hip
// !!! This is a file automatically generated by hipify!!! // // csrmv // // : cusparse (MV) // hipsparseDcsrmv // Ddouble, mvmaxtrix * vector // #include<cuda_runtime.h> #include<iostream> #include<cusparse_v2.h> #include<thrust/device_vector.h> const int N = 1024; int main(int argc, char** argv){ // CSR double elements[N*3]; int columnIndeces[N*3]; int rowOffsets[N+1]; int nonZeroCount = 0; // rowOffsets[0] = 0; // row0 // (CSR) for(int i = 0; i < N; i++){ elements[nonZeroCount] = 2; columnIndeces[nonZeroCount] = i; nonZeroCount++; if(i > 0){ elements[nonZeroCount] = 1; columnIndeces[nonZeroCount] = i - 1; nonZeroCount++; } if(i < N-1){ elements[nonZeroCount] = 1; columnIndeces[nonZeroCount] = i + 1; nonZeroCount++; } rowOffsets[i+1] = nonZeroCount; } // double vector[N]; for(int i = 0; i < N; i++){ vector[i] = i * 0.1; } // double result[N]; // thrust::device_vector<double> elementsDevice(N*3); thrust::device_vector<int> columnIndecesDevice(N*3); thrust::device_vector<int> rowOffsetsDevice(N+1); thrust::device_vector<double> vectorDevice(N); thrust::device_vector<double> resultDevice(N); // thrust::copy_n(elements, N*3, elementsDevice.begin()); thrust::copy_n(columnIndeces, N*3, columnIndecesDevice.begin()); thrust::copy_n(rowOffsets, N+1, rowOffsetsDevice.begin()); thrust::copy_n(vector, N, vectorDevice.begin()); hipsparseHandle_t handle; hipsparseCreate(&handle); hipsparseMatDescr_t matDescr; hipsparseCreateMatDescr(&matDescr); hipsparseSetMatType(matDescr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(matDescr, HIPSPARSE_INDEX_BASE_ZERO); double* elementsPtr = thrust::raw_pointer_cast(&(elementsDevice[0])); int* columnIndecesPtr = thrust::raw_pointer_cast(&(columnIndecesDevice[0])); int* rowOffsetsPtr = thrust::raw_pointer_cast(&(rowOffsetsDevice[0])); double* vectorPtr = thrust::raw_pointer_cast(&(vectorDevice[0])); double* resultPtr = thrust::raw_pointer_cast(&(resultDevice[0])); double alpha = 1.0; double beta = 0.0; hipsparseDcsrmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE,N,N,nonZeroCount, &alpha,matDescr,elementsPtr,rowOffsetsPtr,columnIndecesPtr, vectorPtr,&beta,resultPtr); thrust::copy_n(resultDevice.begin(), N, result); for(int i = 0; i < N; i++){ std::cout << result[i] << std::endl; } return 0; }
454df3b4acbe9dd0df05964f97a2cbad59149be6.cu
// // 【csrmv】 // // 概要: cusparse (MV)を利用するためのサンプルコード // 利用する関数は cusparseDcsrmv であり // ここでDはdouble, mvはmaxtrix * vector である。 // #include<cuda_runtime.h> #include<iostream> #include<cusparse_v2.h> #include<thrust/device_vector.h> const int N = 1024; int main(int argc, char** argv){ // CSR形式疎行列を用意する double elements[N*3]; int columnIndeces[N*3]; int rowOffsets[N+1]; int nonZeroCount = 0; // 非行列要素数 rowOffsets[0] = 0; // rowのオフセットの最初の要素は0 // 行列の初期化(CSR形式) for(int i = 0; i < N; i++){ elements[nonZeroCount] = 2; columnIndeces[nonZeroCount] = i; nonZeroCount++; if(i > 0){ elements[nonZeroCount] = 1; columnIndeces[nonZeroCount] = i - 1; nonZeroCount++; } if(i < N-1){ elements[nonZeroCount] = 1; columnIndeces[nonZeroCount] = i + 1; nonZeroCount++; } rowOffsets[i+1] = nonZeroCount; } // ベクトルを用意 double vector[N]; for(int i = 0; i < N; i++){ vector[i] = i * 0.1; } // 返却用のホストベクトルを用意 double result[N]; // デバイス側の配列を用意 thrust::device_vector<double> elementsDevice(N*3); thrust::device_vector<int> columnIndecesDevice(N*3); thrust::device_vector<int> rowOffsetsDevice(N+1); thrust::device_vector<double> vectorDevice(N); thrust::device_vector<double> resultDevice(N); // ホストベクトルをデバイスに転送 thrust::copy_n(elements, N*3, elementsDevice.begin()); thrust::copy_n(columnIndeces, N*3, columnIndecesDevice.begin()); thrust::copy_n(rowOffsets, N+1, rowOffsetsDevice.begin()); thrust::copy_n(vector, N, vectorDevice.begin()); cusparseHandle_t handle; cusparseCreate(&handle); cusparseMatDescr_t matDescr; cusparseCreateMatDescr(&matDescr); cusparseSetMatType(matDescr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(matDescr, CUSPARSE_INDEX_BASE_ZERO); double* elementsPtr = thrust::raw_pointer_cast(&(elementsDevice[0])); int* columnIndecesPtr = thrust::raw_pointer_cast(&(columnIndecesDevice[0])); int* rowOffsetsPtr = thrust::raw_pointer_cast(&(rowOffsetsDevice[0])); double* vectorPtr = thrust::raw_pointer_cast(&(vectorDevice[0])); double* resultPtr = thrust::raw_pointer_cast(&(resultDevice[0])); double alpha = 1.0; double beta = 0.0; cusparseDcsrmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE,N,N,nonZeroCount, &alpha,matDescr,elementsPtr,rowOffsetsPtr,columnIndecesPtr, vectorPtr,&beta,resultPtr); thrust::copy_n(resultDevice.begin(), N, result); for(int i = 0; i < N; i++){ std::cout << result[i] << std::endl; } return 0; }
2902e79c979d3e0273babfc8ccfab20cf26b7817.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/shard_index_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <typename T> __global__ void ShardIndexInner(const T* in_data, T* out_data, const int64_t numel, const int index_num, const int nshards, const int shard_id, const int ignore_value) { int shard_size = (index_num + nshards - 1) / nshards; int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numel) { assert(in_data[idx] >= 0 && in_data[idx] < index_num); if (in_data[idx] / shard_size == shard_id) { out_data[idx] = in_data[idx] % shard_size; } else { out_data[idx] = ignore_value; } } } using LoDTensor = framework::LoDTensor; template <typename T> class ShardIndexCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>("X"); auto* out = context.Output<LoDTensor>("Out"); int index_num = context.Attr<int>("index_num"); int nshards = context.Attr<int>("nshards"); int shard_id = context.Attr<int>("shard_id"); int ignore_value = context.Attr<int>("ignore_value"); PADDLE_ENFORCE_GT( index_num, 0, platform::errors::InvalidArgument( "The value 'index_num' for Op(shard_index) must be greater than 0, " "but the value given is %d.", index_num)); PADDLE_ENFORCE_GT(nshards, 0, platform::errors::InvalidArgument( "The value 'nshard' for Op(shard_index) must be " "greater than 0, but the value given is %d.", nshards)); PADDLE_ENFORCE_GE( shard_id, 0, platform::errors::InvalidArgument( "The value 'shard_id' for Op(shard_index) must be greater or " "equal to 0, but the value given is %d.", shard_id)); PADDLE_ENFORCE_LT( shard_id, nshards, platform::errors::InvalidArgument( "The value 'shard_id' for Op(shard_index) must be less than " "nshards (%d), but the value given is %d.", nshards, shard_id)); out->Resize(in->dims()); out->set_lod(in->lod()); auto* in_data = in->data<T>(); auto* out_data = out->mutable_data<T>(context.GetPlace()); int64_t numel = in->numel(); auto stream = context.template device_context<platform::CUDADeviceContext>().stream(); hipLaunchKernelGGL(( ShardIndexInner), (numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, in_data, out_data, numel, index_num, nshards, shard_id, ignore_value); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(shard_index, ops::ShardIndexCUDAKernel<int>, ops::ShardIndexCUDAKernel<int64_t>);
2902e79c979d3e0273babfc8ccfab20cf26b7817.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/shard_index_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; template <typename T> __global__ void ShardIndexInner(const T* in_data, T* out_data, const int64_t numel, const int index_num, const int nshards, const int shard_id, const int ignore_value) { int shard_size = (index_num + nshards - 1) / nshards; int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numel) { assert(in_data[idx] >= 0 && in_data[idx] < index_num); if (in_data[idx] / shard_size == shard_id) { out_data[idx] = in_data[idx] % shard_size; } else { out_data[idx] = ignore_value; } } } using LoDTensor = framework::LoDTensor; template <typename T> class ShardIndexCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input<LoDTensor>("X"); auto* out = context.Output<LoDTensor>("Out"); int index_num = context.Attr<int>("index_num"); int nshards = context.Attr<int>("nshards"); int shard_id = context.Attr<int>("shard_id"); int ignore_value = context.Attr<int>("ignore_value"); PADDLE_ENFORCE_GT( index_num, 0, platform::errors::InvalidArgument( "The value 'index_num' for Op(shard_index) must be greater than 0, " "but the value given is %d.", index_num)); PADDLE_ENFORCE_GT(nshards, 0, platform::errors::InvalidArgument( "The value 'nshard' for Op(shard_index) must be " "greater than 0, but the value given is %d.", nshards)); PADDLE_ENFORCE_GE( shard_id, 0, platform::errors::InvalidArgument( "The value 'shard_id' for Op(shard_index) must be greater or " "equal to 0, but the value given is %d.", shard_id)); PADDLE_ENFORCE_LT( shard_id, nshards, platform::errors::InvalidArgument( "The value 'shard_id' for Op(shard_index) must be less than " "nshards (%d), but the value given is %d.", nshards, shard_id)); out->Resize(in->dims()); out->set_lod(in->lod()); auto* in_data = in->data<T>(); auto* out_data = out->mutable_data<T>(context.GetPlace()); int64_t numel = in->numel(); auto stream = context.template device_context<platform::CUDADeviceContext>().stream(); ShardIndexInner<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( in_data, out_data, numel, index_num, nshards, shard_id, ignore_value); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(shard_index, ops::ShardIndexCUDAKernel<int>, ops::ShardIndexCUDAKernel<int64_t>);
bade06faa1093f174276376f1e3fccbe8ed85949.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //#include <helper_functions.h> // for benchmark purpose #define BLOCK_DIM_X 16 #define BLOCK_DIM_Y 16 //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on GPU //! C = alpha * A * B + beta * C //! @param A matrix A as provided to device //! @param B matrix B as provided to device //! @param C matrix C as provided to device //! @param N height of matrix A and matrix C //! @param M width of matrix B and matrix C //! @param K width of matrix A and height of matrix C //! @param alpha scala value for matrix multiplication //! @param beta scala value for matrix summation with C //////////////////////////////////////////////////////////////////////////////// __global__ void sgemm_gpu_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0.f; for (int i = 0; i < K; ++i) { sum += A[row * K + i] * B[i * K + col]; } C[row * M + col] = alpha * sum + beta * C[row * M + col]; } void sgemm_gpu(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { dim3 dimBlock(BLOCK_DIM_X, BLOCK_DIM_Y); dim3 dimGrid(M / dimBlock.x, N / dimBlock.y); hipLaunchKernelGGL(( sgemm_gpu_kernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, A, B, C, N, M, K, alpha, beta); } void random_init(float *data, int size) { for (int i = 0; i < size; ++i) { data[i] = (rand() & 0xFF) / (float)RAND_MAX; } } void performance_estimation(void(*sgemm)(const float *, const float *, float *, int, int, int, float, float), const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int test_iterations = 100; // Create timer //StopWatchInterface *timer = 0; // initial start an operation as a warm start sgemm(A, B, C, N, M, K, alpha, beta); // Record the start event //sdkCreateTimer(&timer); //sdkStartTimer(&timer); //////// // Operation body //////// for (int i = 0; i < test_iterations; i++) { sgemm(A, B, C, N, M, K, alpha, beta); } // Waits for GPU operation finish and recored the time //sdkStopTimer(&timer); // Compute and print the performance //float operation_time = sdkGetAverageTimerValue(&timer); //float operation_time_1_epoch = operation_time / test_iterations; //printf("Operation Time= %.4f msec\n", operation_time_1_epoch); // cleanup //sdkDeleteTimer(&timer); } int main() { float *A, *B, *C; float *d_A, *d_B, *d_C; int N, M, K; float alpha = 2.f; float beta = 1.f; N = M = K = 2048; // allocation of linear memory space A = (float *)malloc(N * K * sizeof(float)); B = (float *)malloc(K * M * sizeof(float)); C = (float *)malloc(N * M * sizeof(float)); // allocation of gpu linear memory space hipMalloc((void **)&d_A, N * K * sizeof(float)); hipMalloc((void **)&d_B, K * M * sizeof(float)); hipMalloc((void **)&d_C, N * M * sizeof(float)); // initialize randomized values for memory space random_init(A, N * K); random_init(B, K * M); random_init(C, N * M); // copy initial value for gpu memory hipMemcpy(d_A, A, N * K * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B, A, K * M * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_C, A, N * M * sizeof(float), hipMemcpyHostToDevice); // do operation //sgemm_gpu(d_A, d_B, d_C, N, M, K, alpha, beta); performance_estimation(sgemm_gpu, d_A, d_B, d_C, N, M, K, alpha, beta); // terminates allocated gpu memory space hipFree(d_A); hipFree(d_B); hipFree(d_C); // terminates allocated memory space free(A); free(B); free(C); return 0; }
bade06faa1093f174276376f1e3fccbe8ed85949.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //#include <helper_functions.h> // for benchmark purpose #define BLOCK_DIM_X 16 #define BLOCK_DIM_Y 16 //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on GPU //! C = alpha * A * B + beta * C //! @param A matrix A as provided to device //! @param B matrix B as provided to device //! @param C matrix C as provided to device //! @param N height of matrix A and matrix C //! @param M width of matrix B and matrix C //! @param K width of matrix A and height of matrix C //! @param alpha scala value for matrix multiplication //! @param beta scala value for matrix summation with C //////////////////////////////////////////////////////////////////////////////// __global__ void sgemm_gpu_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0.f; for (int i = 0; i < K; ++i) { sum += A[row * K + i] * B[i * K + col]; } C[row * M + col] = alpha * sum + beta * C[row * M + col]; } void sgemm_gpu(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { dim3 dimBlock(BLOCK_DIM_X, BLOCK_DIM_Y); dim3 dimGrid(M / dimBlock.x, N / dimBlock.y); sgemm_gpu_kernel <<< dimGrid, dimBlock >>> (A, B, C, N, M, K, alpha, beta); } void random_init(float *data, int size) { for (int i = 0; i < size; ++i) { data[i] = (rand() & 0xFF) / (float)RAND_MAX; } } void performance_estimation(void(*sgemm)(const float *, const float *, float *, int, int, int, float, float), const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int test_iterations = 100; // Create timer //StopWatchInterface *timer = 0; // initial start an operation as a warm start sgemm(A, B, C, N, M, K, alpha, beta); // Record the start event //sdkCreateTimer(&timer); //sdkStartTimer(&timer); //////// // Operation body //////// for (int i = 0; i < test_iterations; i++) { sgemm(A, B, C, N, M, K, alpha, beta); } // Waits for GPU operation finish and recored the time //sdkStopTimer(&timer); // Compute and print the performance //float operation_time = sdkGetAverageTimerValue(&timer); //float operation_time_1_epoch = operation_time / test_iterations; //printf("Operation Time= %.4f msec\n", operation_time_1_epoch); // cleanup //sdkDeleteTimer(&timer); } int main() { float *A, *B, *C; float *d_A, *d_B, *d_C; int N, M, K; float alpha = 2.f; float beta = 1.f; N = M = K = 2048; // allocation of linear memory space A = (float *)malloc(N * K * sizeof(float)); B = (float *)malloc(K * M * sizeof(float)); C = (float *)malloc(N * M * sizeof(float)); // allocation of gpu linear memory space cudaMalloc((void **)&d_A, N * K * sizeof(float)); cudaMalloc((void **)&d_B, K * M * sizeof(float)); cudaMalloc((void **)&d_C, N * M * sizeof(float)); // initialize randomized values for memory space random_init(A, N * K); random_init(B, K * M); random_init(C, N * M); // copy initial value for gpu memory cudaMemcpy(d_A, A, N * K * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, A, K * M * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_C, A, N * M * sizeof(float), cudaMemcpyHostToDevice); // do operation //sgemm_gpu(d_A, d_B, d_C, N, M, K, alpha, beta); performance_estimation(sgemm_gpu, d_A, d_B, d_C, N, M, K, alpha, beta); // terminates allocated gpu memory space cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // terminates allocated memory space free(A); free(B); free(C); return 0; }
470a9612ad8eff1cb86675926ccd4b380e151d95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "helper_cuda.h" #include "helper_string.h" #include "kernels.hpp" #include "ssol_cudakernel.cu" template <int node_dim> __global__ void iter_calc(const float* old, float* val,const float* eval, const int* enode, const int* color_reord, const int offset, const int color_size, const int nedge, const int nnode){ int tid = blockDim.x*blockIdx.x+threadIdx.x; int reordIdx = tid + offset; if(reordIdx<nedge && tid < color_size){ int edgeIdx=color_reord[reordIdx]; for(int dim=0; dim<node_dim;dim++){ #ifndef USE_SOA val[enode[2*edgeIdx+1]*node_dim+dim]+= eval[edgeIdx]*old[enode[edgeIdx*2+0]*node_dim+dim]; #else val[nnode*dim + enode[2*edgeIdx+1]] += eval[edgeIdx]*old[nnode*dim + enode[edgeIdx*2+0]]; #endif } } } //Potential extra version, may help particularly for SoA: //SOA, NDIM=4 20% speedup //AOS, NDIM=4 10% speedup /* template <int node_dim> __global__ void iter_calc(const float* old, float* val,const float* eval, const int* enode, const int* color_reord, const int offset, const int color_size, const int nedge, const int nnode){ int tid = blockDim.x*blockIdx.x+threadIdx.x; int reordIdx = tid + offset; float inc[node_dim]; if(reordIdx<nedge && tid < color_size){ int edgeIdx=color_reord[reordIdx]; #pragma unroll for(int dim=0; dim<node_dim;dim++){ #ifndef USE_SOA inc[dim] = val[enode[2*edgeIdx+1]*node_dim+dim]+ eval[edgeIdx]*old[enode[edgeIdx*2+0]*node_dim+dim]; #else inc[dim] = val[nnode*dim + enode[2*edgeIdx+1]] + eval[edgeIdx]*old[nnode*dim + enode[edgeIdx*2+0]]; #endif } #pragma unroll for(int dim=0; dim<node_dim;dim++){ #ifndef USE_SOA val[enode[2*edgeIdx+1]*node_dim+dim] = inc[dim]; #else val[nnode*dim + enode[2*edgeIdx+1]] = inc[dim]; #endif } } } */ void iter_calc(const int nedge, const int nnode, const int node_dim, const Block_coloring& bc, const Coloring& c, const arg& arg_enode, const arg& arg_edge_val, arg& arg_node_val, const arg& arg_node_old, cacheMap& cm, Kernel& timer){ int* enode_d = (int*) arg_enode.data_d; int* color_reord_d = (int *) c.arg_color_reord.data_d; float * node_val_d = (float*) arg_node_val.data_d; float * node_old_d = (float*) arg_node_old.data_d; float * edge_val_d = (float*) arg_edge_val.data_d; //calc next step for(int col=0; col<c.colornum;col++){ int color_offset = col==0 ? 0 : c.color_offsets[col-1]; int color_size = c.color_offsets[col] - color_offset; timer.timerStart(); hipLaunchKernelGGL(( iter_calc<NODE_DIM>), dim3((color_size-1)/BLOCKSIZE+1),dim3(BLOCKSIZE), 0, 0, node_old_d, node_val_d, edge_val_d, enode_d, color_reord_d, color_offset, color_size, nedge, nnode); checkCudaErrors( hipDeviceSynchronize() ); timer.timerStop(); } }
470a9612ad8eff1cb86675926ccd4b380e151d95.cu
#include "helper_cuda.h" #include "helper_string.h" #include "kernels.hpp" #include "ssol_cudakernel.cu" template <int node_dim> __global__ void iter_calc(const float* old, float* val,const float* eval, const int* enode, const int* color_reord, const int offset, const int color_size, const int nedge, const int nnode){ int tid = blockDim.x*blockIdx.x+threadIdx.x; int reordIdx = tid + offset; if(reordIdx<nedge && tid < color_size){ int edgeIdx=color_reord[reordIdx]; for(int dim=0; dim<node_dim;dim++){ #ifndef USE_SOA val[enode[2*edgeIdx+1]*node_dim+dim]+= eval[edgeIdx]*old[enode[edgeIdx*2+0]*node_dim+dim]; #else val[nnode*dim + enode[2*edgeIdx+1]] += eval[edgeIdx]*old[nnode*dim + enode[edgeIdx*2+0]]; #endif } } } //Potential extra version, may help particularly for SoA: //SOA, NDIM=4 20% speedup //AOS, NDIM=4 10% speedup /* template <int node_dim> __global__ void iter_calc(const float* old, float* val,const float* eval, const int* enode, const int* color_reord, const int offset, const int color_size, const int nedge, const int nnode){ int tid = blockDim.x*blockIdx.x+threadIdx.x; int reordIdx = tid + offset; float inc[node_dim]; if(reordIdx<nedge && tid < color_size){ int edgeIdx=color_reord[reordIdx]; #pragma unroll for(int dim=0; dim<node_dim;dim++){ #ifndef USE_SOA inc[dim] = val[enode[2*edgeIdx+1]*node_dim+dim]+ eval[edgeIdx]*old[enode[edgeIdx*2+0]*node_dim+dim]; #else inc[dim] = val[nnode*dim + enode[2*edgeIdx+1]] + eval[edgeIdx]*old[nnode*dim + enode[edgeIdx*2+0]]; #endif } #pragma unroll for(int dim=0; dim<node_dim;dim++){ #ifndef USE_SOA val[enode[2*edgeIdx+1]*node_dim+dim] = inc[dim]; #else val[nnode*dim + enode[2*edgeIdx+1]] = inc[dim]; #endif } } } */ void iter_calc(const int nedge, const int nnode, const int node_dim, const Block_coloring& bc, const Coloring& c, const arg& arg_enode, const arg& arg_edge_val, arg& arg_node_val, const arg& arg_node_old, cacheMap& cm, Kernel& timer){ int* enode_d = (int*) arg_enode.data_d; int* color_reord_d = (int *) c.arg_color_reord.data_d; float * node_val_d = (float*) arg_node_val.data_d; float * node_old_d = (float*) arg_node_old.data_d; float * edge_val_d = (float*) arg_edge_val.data_d; //calc next step for(int col=0; col<c.colornum;col++){ int color_offset = col==0 ? 0 : c.color_offsets[col-1]; int color_size = c.color_offsets[col] - color_offset; timer.timerStart(); iter_calc<NODE_DIM><<<(color_size-1)/BLOCKSIZE+1,BLOCKSIZE>>>(node_old_d, node_val_d, edge_val_d, enode_d, color_reord_d, color_offset, color_size, nedge, nnode); checkCudaErrors( cudaDeviceSynchronize() ); timer.timerStop(); } }
dffc8713df2e6cd208906f636ec932554cb8f72d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#ifdef __cplusplus //extern "C" { //#endif #include <stdio.h> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <math.h> #include "nms_layer_kernel.h" #include "math_functions.h" #define NUMBER_THREADS_PER_BLOCK_1D 16 #define NUMBER_THREADS_PER_BLOCK 256 int updiv2(const int a, const int b){ return (a+b-1)/b; } __global__ void nms_register_kernel(const Dtype* const src_pointer, int* workspace, const int w, const int h, const Dtype threshold) { // get pixel location (x,y) const int x = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; if( x>0 && x<(w-1) && y>0 && y<(h-1) ){ const Dtype value = src_pointer[y*w + x]; if(value > threshold){ const Dtype top = src_pointer[(y-1)*w + x]; const Dtype bottom = src_pointer[(y+1)*w + x]; const Dtype left = src_pointer[y*w + (x-1)]; const Dtype right = src_pointer[y*w + (x+1)]; const Dtype top_left = src_pointer[(y-1)*w + x-1]; const Dtype top_right = src_pointer[(y-1)*w + x+1]; const Dtype bottom_left = src_pointer[(y+1)*w + x-1]; const Dtype bottom_right = src_pointer[(y+1)*w + x+1]; if(value > top && value > bottom && value > left && value > right && value > top_left && value > bottom_left && value > bottom_right && value > top_right ){ workspace[y*w + x] = 1; } else { workspace[y*w + x] = 0; } } else { workspace[y*w + x] = 0; } } else if( x==0 || x==(w-1) || y==0 || y==(h-1) ){ workspace[y*w + x] = 0; } } __global__ void writeResultKernel(const int length, const int* const input, const Dtype* const src_pointer, Dtype* output, const int height, const int width, const int max_peaks){ __shared__ int local[NUMBER_THREADS_PER_BLOCK+1]; // one more const int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; if(globalIdx < length){ local[threadIdx.x] = input[globalIdx]; if(threadIdx.x == NUMBER_THREADS_PER_BLOCK - 1 && globalIdx != length - 1){ //last thread in the block but not globally last, load one more local[threadIdx.x+1] = input[globalIdx+1]; } __syncthreads(); // see difference, except the globally last one if(globalIdx != length - 1){ if(local[threadIdx.x] != local[threadIdx.x + 1]) { //means A[globalIdx] == A[globalIdx + 1] as the input[globalIdx]-th repeat const int peak_index = input[globalIdx]; //0-index const int peak_loc = globalIdx; const int peak_loc_x = peak_loc % width; const int peak_loc_y = peak_loc / width; if(peak_index < max_peaks){ //limitation //output[input[globalIdx]] = globalIdx; // if (1) { // float x_acc = peak_loc_x; // float y_acc = peak_loc_y; // float score_acc = src_pointer[peak_loc_y*width + peak_loc_x]; float x_acc = 0.f; float y_acc = 0.f; float score_acc = 0.f; // int count = 0; for (int dy=-3;dy<4;dy++) { if ((peak_loc_y+dy)>0 && (peak_loc_y+dy)<height) { for (int dx=-3;dx<4;dx++) { if ((peak_loc_x+dx)>0 && (peak_loc_x+dx)<width) { const float score = src_pointer[(peak_loc_y+dy)*width + peak_loc_x+dx]; const float x = peak_loc_x+dx; const float y = peak_loc_y+dy; if (score>0) { x_acc += x*score; y_acc += y*score; score_acc += score; // count += 1; } } } } } const int output_index = (peak_index + 1) * 3; output[output_index] = x_acc/score_acc; output[output_index + 1] = y_acc/score_acc; output[output_index + 2] = src_pointer[peak_loc_y*width + peak_loc_x]; // printf("%d, %d: %d, %d: %f, %f, %f, %f\n", width, height, peak_loc_x, peak_loc_y, output[output_index], output[output_index + 1], output[output_index + 2], score_acc); // // if(output[output_index + 1] == NAN || output[output_index] == NAN) { // printf("NAN\n"); // } // } else { // const int output_index = (peak_index + 1) * 3; // output[output_index] = peak_loc_x; // output[output_index + 1] = peak_loc_y; // output[output_index + 2] = src_pointer[peak_loc_y*width + peak_loc_x]; // } } } } else { //number of peaks output[0] = input[globalIdx] < max_peaks ? input[globalIdx] : max_peaks; } } } void nms_ongpu(const Dtype * src_ptr, Dtype *dst_ptr, const int num, const int height, const int width, const int num_parts, const int max_peaks, const Dtype threshold, int * work_ptr, hipStream_t stream) { const int offset = height * width; const int offset_dst = (max_peaks+1)*3; const dim3 threadsPerBlock(NUMBER_THREADS_PER_BLOCK_1D, NUMBER_THREADS_PER_BLOCK_1D); const dim3 numBlocks(updiv2(width, threadsPerBlock.x), updiv2(height, threadsPerBlock.y)); hipError_t err; for(int n = 0; n < num; n++){ // batch for(int c = 0; c < num_parts; c++){ int* w_pointer1 = work_ptr + n * num_parts * offset + c * offset; const Dtype* src = src_ptr + n * num_parts * offset + c * offset; Dtype* dst = dst_ptr + n * num_parts * offset_dst + c * offset_dst; // This returns w_pointer1, a binary array with 0s & 1s. 1s in the local maximum positions (size = size(src)) hipLaunchKernelGGL(( nms_register_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, src, w_pointer1, width, height, threshold); //[0,0,0,0,1,0,0,0,0,1,0,0,0,0] err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } thrust::device_ptr<int> dev_ptr = thrust::device_pointer_cast(w_pointer1); // This modifies w_pointer1, now it indicates the local maximum indexes. Format: 0,0,0,1,1,1,1,2,2,2,... First maximum: 2, second: 6, etc... thrust::exclusive_scan(dev_ptr, dev_ptr + offset, dev_ptr); //[0,0,0,0,0,1,1,1,1,1,2,2,2,2] // This returns dst, with the NMS applied over it hipLaunchKernelGGL(( writeResultKernel), dim3(updiv2(offset,NUMBER_THREADS_PER_BLOCK)), dim3(NUMBER_THREADS_PER_BLOCK), 0, stream, offset, w_pointer1, src, dst, height, width, max_peaks); err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } } } } //#ifdef __cplusplus //} //#endif
dffc8713df2e6cd208906f636ec932554cb8f72d.cu
//#ifdef __cplusplus //extern "C" { //#endif #include <stdio.h> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <math.h> #include "nms_layer_kernel.h" #include "math_functions.h" #define NUMBER_THREADS_PER_BLOCK_1D 16 #define NUMBER_THREADS_PER_BLOCK 256 int updiv2(const int a, const int b){ return (a+b-1)/b; } __global__ void nms_register_kernel(const Dtype* const src_pointer, int* workspace, const int w, const int h, const Dtype threshold) { // get pixel location (x,y) const int x = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; if( x>0 && x<(w-1) && y>0 && y<(h-1) ){ const Dtype value = src_pointer[y*w + x]; if(value > threshold){ const Dtype top = src_pointer[(y-1)*w + x]; const Dtype bottom = src_pointer[(y+1)*w + x]; const Dtype left = src_pointer[y*w + (x-1)]; const Dtype right = src_pointer[y*w + (x+1)]; const Dtype top_left = src_pointer[(y-1)*w + x-1]; const Dtype top_right = src_pointer[(y-1)*w + x+1]; const Dtype bottom_left = src_pointer[(y+1)*w + x-1]; const Dtype bottom_right = src_pointer[(y+1)*w + x+1]; if(value > top && value > bottom && value > left && value > right && value > top_left && value > bottom_left && value > bottom_right && value > top_right ){ workspace[y*w + x] = 1; } else { workspace[y*w + x] = 0; } } else { workspace[y*w + x] = 0; } } else if( x==0 || x==(w-1) || y==0 || y==(h-1) ){ workspace[y*w + x] = 0; } } __global__ void writeResultKernel(const int length, const int* const input, const Dtype* const src_pointer, Dtype* output, const int height, const int width, const int max_peaks){ __shared__ int local[NUMBER_THREADS_PER_BLOCK+1]; // one more const int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; if(globalIdx < length){ local[threadIdx.x] = input[globalIdx]; if(threadIdx.x == NUMBER_THREADS_PER_BLOCK - 1 && globalIdx != length - 1){ //last thread in the block but not globally last, load one more local[threadIdx.x+1] = input[globalIdx+1]; } __syncthreads(); // see difference, except the globally last one if(globalIdx != length - 1){ if(local[threadIdx.x] != local[threadIdx.x + 1]) { //means A[globalIdx] == A[globalIdx + 1] as the input[globalIdx]-th repeat const int peak_index = input[globalIdx]; //0-index const int peak_loc = globalIdx; const int peak_loc_x = peak_loc % width; const int peak_loc_y = peak_loc / width; if(peak_index < max_peaks){ //limitation //output[input[globalIdx]] = globalIdx; // if (1) { // float x_acc = peak_loc_x; // float y_acc = peak_loc_y; // float score_acc = src_pointer[peak_loc_y*width + peak_loc_x]; float x_acc = 0.f; float y_acc = 0.f; float score_acc = 0.f; // int count = 0; for (int dy=-3;dy<4;dy++) { if ((peak_loc_y+dy)>0 && (peak_loc_y+dy)<height) { for (int dx=-3;dx<4;dx++) { if ((peak_loc_x+dx)>0 && (peak_loc_x+dx)<width) { const float score = src_pointer[(peak_loc_y+dy)*width + peak_loc_x+dx]; const float x = peak_loc_x+dx; const float y = peak_loc_y+dy; if (score>0) { x_acc += x*score; y_acc += y*score; score_acc += score; // count += 1; } } } } } const int output_index = (peak_index + 1) * 3; output[output_index] = x_acc/score_acc; output[output_index + 1] = y_acc/score_acc; output[output_index + 2] = src_pointer[peak_loc_y*width + peak_loc_x]; // printf("%d, %d: %d, %d: %f, %f, %f, %f\n", width, height, peak_loc_x, peak_loc_y, output[output_index], output[output_index + 1], output[output_index + 2], score_acc); // // if(output[output_index + 1] == NAN || output[output_index] == NAN) { // printf("NAN\n"); // } // } else { // const int output_index = (peak_index + 1) * 3; // output[output_index] = peak_loc_x; // output[output_index + 1] = peak_loc_y; // output[output_index + 2] = src_pointer[peak_loc_y*width + peak_loc_x]; // } } } } else { //number of peaks output[0] = input[globalIdx] < max_peaks ? input[globalIdx] : max_peaks; } } } void nms_ongpu(const Dtype * src_ptr, Dtype *dst_ptr, const int num, const int height, const int width, const int num_parts, const int max_peaks, const Dtype threshold, int * work_ptr, cudaStream_t stream) { const int offset = height * width; const int offset_dst = (max_peaks+1)*3; const dim3 threadsPerBlock(NUMBER_THREADS_PER_BLOCK_1D, NUMBER_THREADS_PER_BLOCK_1D); const dim3 numBlocks(updiv2(width, threadsPerBlock.x), updiv2(height, threadsPerBlock.y)); cudaError_t err; for(int n = 0; n < num; n++){ // batch for(int c = 0; c < num_parts; c++){ int* w_pointer1 = work_ptr + n * num_parts * offset + c * offset; const Dtype* src = src_ptr + n * num_parts * offset + c * offset; Dtype* dst = dst_ptr + n * num_parts * offset_dst + c * offset_dst; // This returns w_pointer1, a binary array with 0s & 1s. 1s in the local maximum positions (size = size(src)) nms_register_kernel<<<numBlocks, threadsPerBlock, 0, stream>>>(src, w_pointer1, width, height, threshold); //[0,0,0,0,1,0,0,0,0,1,0,0,0,0] err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } thrust::device_ptr<int> dev_ptr = thrust::device_pointer_cast(w_pointer1); // This modifies w_pointer1, now it indicates the local maximum indexes. Format: 0,0,0,1,1,1,1,2,2,2,... First maximum: 2, second: 6, etc... thrust::exclusive_scan(dev_ptr, dev_ptr + offset, dev_ptr); //[0,0,0,0,0,1,1,1,1,1,2,2,2,2] // This returns dst, with the NMS applied over it writeResultKernel<<<updiv2(offset,NUMBER_THREADS_PER_BLOCK), NUMBER_THREADS_PER_BLOCK, 0, stream>>>( offset, w_pointer1, src, dst, height, width, max_peaks); err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } } } //#ifdef __cplusplus //} //#endif
f664c19861df45a88d94c76a115bad0e97ffd809.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <THH/THHGeneral.h> #include <THH/THHTensorRandom.h> #include <THH/THHGenerator.hpp> #include <THH/THHApply.cuh> #include <THH/THHDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> /** * Note [Register spilling in hiprand call for CUDA < 10] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For CUDA < 10, hiprandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth) * when called to generate one random number at a time. This is because the line * unsigned ret = (&state->output.x)[state->STATE++]; * in * QUALIFIERS unsigned int hiprand(hiprandStatePhilox4_32_10_t *state) * in hiprand/hiprand_kernel.h dynamically indexes into state.output, preventing the compiler from ever * storing state.output in registers. * * CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels * we are using hiprand distributions that utilize hiprand4 call. hiprand4 call doesn't have the * register spilling problem. */ THCGenerator* THCRandom_getGenerator(THCState* state); namespace { // Increment should be at least the number of hiprand() random numbers used in // each thread. It is the user's responsibility to make sure that the increment for philox is never // smaller than the number of hiprand() calls. Increment value > the number of hiprand() calls // won't harm but anything less would mean that you would be reusing random values from // previous calls. // e.g. In many kernels below, we use distributions that utilize hiprand4 call in the kernel. // Hence, increment value should be at least 4 for those kernels. std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) { auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState()); uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment); return std::make_pair(gen_->state.initial_seed, offset); } // launch bounds used for kernels utilizing TensorIterator const uint32_t block_size_bound = 256; const uint32_t grid_size_bound = 4; // number of randoms given by distributions like hiprand_uniform4, hiprand_uniform2_double // used in calculating philox offset. const uint32_t curand4_engine_calls = 4; // utility function that calculates proper philox_offset // for distributions utilizing TensorIterator. For distributions using // TensorIterator, we are using a grid-stride loop with each // thread yielding one element per thread. For the edge of the grid-stride // loop, if the tensor size is large, the unroll loop will kick in and the float4 // from hiprand4 will start getting utilized (for common tensor sizes, we end up // using rand.x from each thread). Hence, the philox_offset is // (number of elements per thread * number of engine calls), which makes // sure that philox offset increment is not less than the number of randoms used // in each thread. std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) { const uint64_t numel = static_cast<uint64_t>(total_elements); const uint32_t block_size = block_size_bound; const uint32_t unroll = curand4_engine_calls; dim3 dim_block(block_size); dim3 grid((numel + block_size - 1) / block_size); uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size; grid.x = ::min( static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1) * curand4_engine_calls; return std::make_tuple(counter_offset, grid, dim_block); } // grid stride loop kernel for distributions template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t> C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound) __global__ void distribution_elementwise_grid_stride_kernel(int numel, std::pair<uint64_t, uint64_t> seeds, const dist_t dist_func, const transform_t transform_func) { int idx = blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, idx, seeds.second, &state); int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) * blockDim.x * gridDim.x * unroll_factor; for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) { auto rand = dist_func(&state); #pragma unroll for (int ii = 0; ii < unroll_factor; ii++) { int li = linear_index + blockDim.x * gridDim.x * ii; if (li < numel) { transform_func(li, static_cast<accscalar_t>((&rand.x)[ii])); } } __syncthreads(); } } template<typename scalar_t, typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t> void distribution_nullary_kernel(at::TensorIterator& iter, at::Generator* gen, const dist_t& dist_func, const transform_t transform_func) { static_assert(unroll_factor >= 1, "unroll_factor must be >= 1."); int64_t numel = iter.numel(); if (numel == 0) { return; } auto execution_policy = calc_execution_policy(numel); auto counter_offset = std::get<0>(execution_policy); auto grid = std::get<1>(execution_policy); auto block = std::get<2>(execution_policy); auto seeds = next_philox_seed(gen, counter_offset); if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter, gen, dist_func, transform_func); } return; } char* out_data = (char*)iter.data_ptr(0); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (iter.is_trivial_1d()) { auto strides = iter.get_inner_strides(); int stride0 = strides[0]; hipLaunchKernelGGL(( distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor>), dim3(grid), dim3(block), 0, stream, numel, seeds, dist_func, [=]__device__(int idx, accscalar_t rand) { scalar_t* out = (scalar_t*)&out_data[stride0 * idx]; *out = transform_func(rand); } ); } else { auto offset_calc = at::native::make_offset_calculator<1>(iter); hipLaunchKernelGGL(( distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor>), dim3(grid), dim3(block), 0, stream, numel, seeds, dist_func, [=]__device__(int idx, accscalar_t rand) { auto offsets = offset_calc.get(idx); scalar_t* out = (scalar_t*)&out_data[offsets[0]]; *out = transform_func(rand); } ); } AT_CUDA_CHECK(hipGetLastError()); } template <typename scalar_t> void poisson_cuda_kernel( at::Tensor& ret, const at::Tensor& lambda, std::pair<uint64_t, uint64_t> seeds) { at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, lambda, [seeds] __device__( scalar_t & ret_val, const scalar_t& lambda) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); ret_val = static_cast<scalar_t>(hiprand_poisson(&state, lambda)); }); } template <typename scalar_t> void gamma_cuda_kernel( at::Tensor& ret, const at::Tensor& alpha, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, alpha, [seeds] __device__( scalar_t & ret_val, const scalar_t& alpha) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); auto uniform_lambda = [&state] __device__ () { return hiprand_uniform(&state); }; BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state] __device__ () { return hiprand_normal(&state); }; BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal); auto min_value = std::numeric_limits<scalar_t>::min(); ret_val = (min_value > sample) ? min_value : sample; }); } template <typename scalar_t> void gamma_grad_cuda_kernel( at::Tensor& ret, const at::Tensor& self, const at::Tensor& output) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>( ret, self, output, [] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) { ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val); }); } template<typename scalar_t, typename prob_t> void bernoulli_tensor_cuda_kernel( at::Tensor& ret, const at::Tensor& p, std::pair<uint64_t, uint64_t> seeds) { // The template argument `4` below indicates that we want to operate on four // element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details. at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>( ret, p, [seeds] __device__( int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4, const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); // See Note [Register spilling in hiprand call for CUDA < 10] float4 rand = hiprand_uniform4(&state); switch (n) { case 4: { assert(0 <= p4 && p4 <= 1); v4 = static_cast<scalar_t>(rand.w <= p4); // fallthrough } case 3: { assert(0 <= p3 && p3 <= 1); v3 = static_cast<scalar_t>(rand.z <= p3); // fallthrough } case 2: { assert(0 <= p2 && p2 <= 1); v2 = static_cast<scalar_t>(rand.y <= p2); // fallthrough } case 1: { assert(0 <= p1 && p1 <= 1); v1 = static_cast<scalar_t>(rand.x <= p1); } } } ); } template<typename scalar_t> void bernoulli_scalar_cuda_kernel( at::Tensor& ret, double p_, std::pair<uint64_t, uint64_t> seeds) { float p = static_cast<float>(p_); // The template argument `4` below indicates that we want to operate on four // element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details. at::cuda::CUDA_tensor_apply1<scalar_t, 4>( ret, [seeds, p] __device__( int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4) { hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); // See Note [Register spilling in hiprand call for CUDA < 10] float4 rand = hiprand_uniform4(&state); switch (n) { case 4: { v4 = static_cast<scalar_t>(rand.w <= p); // fallthrough } case 3: { v3 = static_cast<scalar_t>(rand.z <= p); // fallthrough } case 2: { v2 = static_cast<scalar_t>(rand.y <= p); // fallthrough } case 1: { v1 = static_cast<scalar_t>(rand.x <= p); } } } ); } template<typename scalar_t> void dirichlet_scalar_cuda_kernel( at::Tensor& ret, const at::Tensor& gamma) { auto gamma_sum = gamma.sum(-1, true).expand(ret.sizes()); at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(ret, gamma, gamma_sum, [] __device__(scalar_t &ret_val, const scalar_t &gamma, const scalar_t &gamma_sum) { ret_val = gamma / gamma_sum; auto min_value = std::numeric_limits<scalar_t>::min(); auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon(); ret_val = (min_value > ret_val) ? min_value : ret_val; ret_val = (max_value < ret_val) ? max_value : ret_val; }); } } // namespace namespace at { namespace native { Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen) { Tensor ret = at::empty(lambda.sizes(), lambda.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "poisson_cuda", [&] { poisson_cuda_kernel<scalar_t>(ret, lambda, next_philox_seed(gen, 20)); }); return ret; } Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen) { Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "gamma_cuda", [&] { gamma_cuda_kernel<scalar_t>(ret, alpha, next_philox_seed(gen, 10)); }); return ret; } Tensor _s_dirichlet_cuda(const Tensor& alpha, Generator* gen) { Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::empty(alpha.sizes(), alpha.options()); gamma_cuda_kernel<scalar_t>(gamma, alpha, next_philox_seed(gen, 10)); dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma); }); return ret; } Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) { Tensor ret = at::empty(self.sizes(), self.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "_standard_gamma_grad_cuda", [&] { gamma_grad_cuda_kernel<scalar_t>(ret, self, output); }); return ret; } Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) { auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA))); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] { using self_t = scalar_t; auto seeds = next_philox_seed(gen, 10); AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] { using p_t = scalar_t; return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, seeds); }); }); return self; } Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) { AT_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "bernoulli_scalar_cuda_", [&] { auto seeds = next_philox_seed(gen, 10); bernoulli_scalar_cuda_kernel<scalar_t>(self, p, seeds); }); return self; } void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "uniform_cuda", [&] { auto from = static_cast<scalar_t>(from_); auto to = static_cast<scalar_t>(to_); AT_CHECK(from <= to, "uniform_ expects to return a [from, to) range, but found from=", from, " > to=", to); AT_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(), "uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()), ">::max(), but found to=", to, " and from=", from, " which result in to-from to exceed the limit"); using accscalar_t = at::acc_type<scalar_t, true>; auto range = static_cast<accscalar_t>(to-from); from = static_cast<accscalar_t>(from); // define lambda to reverse bounds, multiply 'range' and add 'from_' auto uniform_func = [range, from] __device__ (accscalar_t rand) { // reverse the bounds of hiprand4 from (0, 1] to [0, 1) // Note that this method is from legacy THCTensorRandom and is likely to give // you more 0-s, since, the probability of gettings 1-s is higher than 0-s and // by reversing the bounds, we are flipping the probabilities of 1-s and 0-s. auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand; return static_cast<scalar_t>(reverse_bound_rand * range + from); }; if (std::is_same<scalar_t, double>::value) { distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter, gen, [] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform2_double(state); }, uniform_func); } else { distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, [] __device__ (hiprandStatePhilox4_32_10_t* state) { return hiprand_uniform4(state); }, uniform_func); } }); } Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) { auto iter = TensorIterator::nullary_op(self); uniform_kernel_cuda(*iter, from, to, gen); return self; } }} // namespace at::native
f664c19861df45a88d94c76a115bad0e97ffd809.cu
#include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <curand.h> #include <curand_kernel.h> #include <curand_philox4x32_x.h> #include <utility> #include <functional> #include <ATen/native/Distributions.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <THC/THCGeneral.h> #include <THC/THCTensorRandom.h> #include <THC/THCGenerator.hpp> #include <THC/THCApply.cuh> #include <THC/THCDeviceUtils.cuh> #include <cstdint> #include <limits> #include <utility> #include <type_traits> /** * Note [Register spilling in curand call for CUDA < 10] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * For CUDA < 10, curandStatePhilox4_32_10_t engine achieves poor performance (60% SOL bandwidth) * when called to generate one random number at a time. This is because the line * unsigned ret = (&state->output.x)[state->STATE++]; * in * QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state) * in curand_kernel.h dynamically indexes into state.output, preventing the compiler from ever * storing state.output in registers. * * CUDA 10 fixed this problem. However, for backwards compatibility, in the following kernels * we are using curand distributions that utilize curand4 call. curand4 call doesn't have the * register spilling problem. */ THCGenerator* THCRandom_getGenerator(THCState* state); namespace { // Increment should be at least the number of curand() random numbers used in // each thread. It is the user's responsibility to make sure that the increment for philox is never // smaller than the number of curand() calls. Increment value > the number of curand() calls // won't harm but anything less would mean that you would be reusing random values from // previous calls. // e.g. In many kernels below, we use distributions that utilize curand4 call in the kernel. // Hence, increment value should be at least 4 for those kernels. std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) { auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState()); uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment); return std::make_pair(gen_->state.initial_seed, offset); } // launch bounds used for kernels utilizing TensorIterator const uint32_t block_size_bound = 256; const uint32_t grid_size_bound = 4; // number of randoms given by distributions like curand_uniform4, curand_uniform2_double // used in calculating philox offset. const uint32_t curand4_engine_calls = 4; // utility function that calculates proper philox_offset // for distributions utilizing TensorIterator. For distributions using // TensorIterator, we are using a grid-stride loop with each // thread yielding one element per thread. For the edge of the grid-stride // loop, if the tensor size is large, the unroll loop will kick in and the float4 // from curand4 will start getting utilized (for common tensor sizes, we end up // using rand.x from each thread). Hence, the philox_offset is // (number of elements per thread * number of engine calls), which makes // sure that philox offset increment is not less than the number of randoms used // in each thread. std::tuple<uint64_t, dim3, dim3> calc_execution_policy(int64_t total_elements) { const uint64_t numel = static_cast<uint64_t>(total_elements); const uint32_t block_size = block_size_bound; const uint32_t unroll = curand4_engine_calls; dim3 dim_block(block_size); dim3 grid((numel + block_size - 1) / block_size); uint32_t blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor / block_size; grid.x = std::min( static_cast<uint32_t>(at::cuda::getCurrentDeviceProperties()->multiProcessorCount) * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state uint64_t counter_offset = ((numel - 1) / (block_size * grid.x * unroll) + 1) * curand4_engine_calls; return std::make_tuple(counter_offset, grid, dim_block); } // grid stride loop kernel for distributions template<typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t> C10_LAUNCH_BOUNDS_2(block_size_bound, grid_size_bound) __global__ void distribution_elementwise_grid_stride_kernel(int numel, std::pair<uint64_t, uint64_t> seeds, const dist_t dist_func, const transform_t transform_func) { int idx = blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init( seeds.first, idx, seeds.second, &state); int rounded_size = ((numel - 1)/(blockDim.x * gridDim.x * unroll_factor)+1) * blockDim.x * gridDim.x * unroll_factor; for(int linear_index = idx; linear_index < rounded_size; linear_index += blockDim.x * gridDim.x * unroll_factor) { auto rand = dist_func(&state); #pragma unroll for (int ii = 0; ii < unroll_factor; ii++) { int li = linear_index + blockDim.x * gridDim.x * ii; if (li < numel) { transform_func(li, static_cast<accscalar_t>((&rand.x)[ii])); } } __syncthreads(); } } template<typename scalar_t, typename accscalar_t, int unroll_factor, typename dist_t, typename transform_t> void distribution_nullary_kernel(at::TensorIterator& iter, at::Generator* gen, const dist_t& dist_func, const transform_t transform_func) { static_assert(unroll_factor >= 1, "unroll_factor must be >= 1."); int64_t numel = iter.numel(); if (numel == 0) { return; } auto execution_policy = calc_execution_policy(numel); auto counter_offset = std::get<0>(execution_policy); auto grid = std::get<1>(execution_policy); auto block = std::get<2>(execution_policy); auto seeds = next_philox_seed(gen, counter_offset); if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { distribution_nullary_kernel<scalar_t, accscalar_t, unroll_factor>(sub_iter, gen, dist_func, transform_func); } return; } char* out_data = (char*)iter.data_ptr(0); auto stream = at::cuda::getCurrentCUDAStream(); if (iter.is_trivial_1d()) { auto strides = iter.get_inner_strides(); int stride0 = strides[0]; distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>( numel, seeds, dist_func, [=]__device__(int idx, accscalar_t rand) { scalar_t* out = (scalar_t*)&out_data[stride0 * idx]; *out = transform_func(rand); } ); } else { auto offset_calc = at::native::make_offset_calculator<1>(iter); distribution_elementwise_grid_stride_kernel<accscalar_t, unroll_factor><<<grid, block, 0, stream>>>( numel, seeds, dist_func, [=]__device__(int idx, accscalar_t rand) { auto offsets = offset_calc.get(idx); scalar_t* out = (scalar_t*)&out_data[offsets[0]]; *out = transform_func(rand); } ); } AT_CUDA_CHECK(cudaGetLastError()); } template <typename scalar_t> void poisson_cuda_kernel( at::Tensor& ret, const at::Tensor& lambda, std::pair<uint64_t, uint64_t> seeds) { at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, lambda, [seeds] __device__( scalar_t & ret_val, const scalar_t& lambda) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); ret_val = static_cast<scalar_t>(curand_poisson(&state, lambda)); }); } template <typename scalar_t> void gamma_cuda_kernel( at::Tensor& ret, const at::Tensor& alpha, std::pair<uint64_t, uint64_t> seeds) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>( ret, alpha, [seeds] __device__( scalar_t & ret_val, const scalar_t& alpha) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); auto uniform_lambda = [&state] __device__ () { return curand_uniform(&state); }; BaseSampler<accscalar_t, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state] __device__ () { return curand_normal(&state); }; BaseSampler<accscalar_t, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<scalar_t, accscalar_t, decltype(uniform_lambda), decltype(normal_lambda)>(alpha, standard_uniform, standard_normal); auto min_value = std::numeric_limits<scalar_t>::min(); ret_val = (min_value > sample) ? min_value : sample; }); } template <typename scalar_t> void gamma_grad_cuda_kernel( at::Tensor& ret, const at::Tensor& self, const at::Tensor& output) { using accscalar_t = at::acc_type<scalar_t, true>; at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>( ret, self, output, [] __device__ (scalar_t& ret_val, const scalar_t& self_val, const scalar_t &output_val) { ret_val = standard_gamma_grad_one<scalar_t, accscalar_t>(self_val, output_val); }); } template<typename scalar_t, typename prob_t> void bernoulli_tensor_cuda_kernel( at::Tensor& ret, const at::Tensor& p, std::pair<uint64_t, uint64_t> seeds) { // The template argument `4` below indicates that we want to operate on four // element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details. at::cuda::CUDA_tensor_apply2<scalar_t, prob_t, 4>( ret, p, [seeds] __device__( int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4, const prob_t& p1, const prob_t& p2, const prob_t& p3, const prob_t& p4) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); // See Note [Register spilling in curand call for CUDA < 10] float4 rand = curand_uniform4(&state); switch (n) { case 4: { assert(0 <= p4 && p4 <= 1); v4 = static_cast<scalar_t>(rand.w <= p4); // fallthrough } case 3: { assert(0 <= p3 && p3 <= 1); v3 = static_cast<scalar_t>(rand.z <= p3); // fallthrough } case 2: { assert(0 <= p2 && p2 <= 1); v2 = static_cast<scalar_t>(rand.y <= p2); // fallthrough } case 1: { assert(0 <= p1 && p1 <= 1); v1 = static_cast<scalar_t>(rand.x <= p1); } } } ); } template<typename scalar_t> void bernoulli_scalar_cuda_kernel( at::Tensor& ret, double p_, std::pair<uint64_t, uint64_t> seeds) { float p = static_cast<float>(p_); // The template argument `4` below indicates that we want to operate on four // element at each time. See NOTE [ CUDA_tensor_applyN helpers ] for details. at::cuda::CUDA_tensor_apply1<scalar_t, 4>( ret, [seeds, p] __device__( int n, scalar_t& v1, scalar_t& v2, scalar_t& v3, scalar_t& v4) { curandStatePhilox4_32_10_t state; curand_init( seeds.first, blockIdx.x * blockDim.x + threadIdx.x, seeds.second, &state); // See Note [Register spilling in curand call for CUDA < 10] float4 rand = curand_uniform4(&state); switch (n) { case 4: { v4 = static_cast<scalar_t>(rand.w <= p); // fallthrough } case 3: { v3 = static_cast<scalar_t>(rand.z <= p); // fallthrough } case 2: { v2 = static_cast<scalar_t>(rand.y <= p); // fallthrough } case 1: { v1 = static_cast<scalar_t>(rand.x <= p); } } } ); } template<typename scalar_t> void dirichlet_scalar_cuda_kernel( at::Tensor& ret, const at::Tensor& gamma) { auto gamma_sum = gamma.sum(-1, true).expand(ret.sizes()); at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>(ret, gamma, gamma_sum, [] __device__(scalar_t &ret_val, const scalar_t &gamma, const scalar_t &gamma_sum) { ret_val = gamma / gamma_sum; auto min_value = std::numeric_limits<scalar_t>::min(); auto max_value = 1 - std::numeric_limits<scalar_t>::epsilon(); ret_val = (min_value > ret_val) ? min_value : ret_val; ret_val = (max_value < ret_val) ? max_value : ret_val; }); } } // namespace namespace at { namespace native { Tensor _s_poisson_cuda(const Tensor& lambda, Generator* gen) { Tensor ret = at::empty(lambda.sizes(), lambda.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "poisson_cuda", [&] { poisson_cuda_kernel<scalar_t>(ret, lambda, next_philox_seed(gen, 20)); }); return ret; } Tensor _s_gamma_cuda(const Tensor& alpha, Generator* gen) { Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "gamma_cuda", [&] { gamma_cuda_kernel<scalar_t>(ret, alpha, next_philox_seed(gen, 10)); }); return ret; } Tensor _s_dirichlet_cuda(const Tensor& alpha, Generator* gen) { Tensor ret = at::empty(alpha.sizes(), alpha.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "dirichlet", [&] { Tensor gamma = at::empty(alpha.sizes(), alpha.options()); gamma_cuda_kernel<scalar_t>(gamma, alpha, next_philox_seed(gen, 10)); dirichlet_scalar_cuda_kernel<scalar_t>(ret, gamma); }); return ret; } Tensor _standard_gamma_grad_cuda(const Tensor& self, const Tensor& output) { Tensor ret = at::empty(self.sizes(), self.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "_standard_gamma_grad_cuda", [&] { gamma_grad_cuda_kernel<scalar_t>(ret, self, output); }); return ret; } Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) { auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA))); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] { using self_t = scalar_t; auto seeds = next_philox_seed(gen, 10); AT_DISPATCH_FLOATING_TYPES_AND_HALF(p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] { using p_t = scalar_t; return bernoulli_tensor_cuda_kernel<self_t, p_t>(self, p, seeds); }); }); return self; } Tensor& bernoulli_scalar_cuda_(Tensor &self, double p, Generator* gen) { AT_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), "bernoulli_scalar_cuda_", [&] { auto seeds = next_philox_seed(gen, 10); bernoulli_scalar_cuda_kernel<scalar_t>(self, p, seeds); }); return self; } void uniform_kernel_cuda(TensorIterator& iter, double from_, double to_, Generator* gen) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "uniform_cuda", [&] { auto from = static_cast<scalar_t>(from_); auto to = static_cast<scalar_t>(to_); AT_CHECK(from <= to, "uniform_ expects to return a [from, to) range, but found from=", from, " > to=", to); AT_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(), "uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()), ">::max(), but found to=", to, " and from=", from, " which result in to-from to exceed the limit"); using accscalar_t = at::acc_type<scalar_t, true>; auto range = static_cast<accscalar_t>(to-from); from = static_cast<accscalar_t>(from); // define lambda to reverse bounds, multiply 'range' and add 'from_' auto uniform_func = [range, from] __device__ (accscalar_t rand) { // reverse the bounds of curand4 from (0, 1] to [0, 1) // Note that this method is from legacy THCTensorRandom and is likely to give // you more 0-s, since, the probability of gettings 1-s is higher than 0-s and // by reversing the bounds, we are flipping the probabilities of 1-s and 0-s. auto reverse_bound_rand = rand == static_cast<accscalar_t>(1.0) ? static_cast<accscalar_t>(0.0) : rand; return static_cast<scalar_t>(reverse_bound_rand * range + from); }; if (std::is_same<scalar_t, double>::value) { distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls/2>(iter, gen, [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform2_double(state); }, uniform_func); } else { distribution_nullary_kernel<scalar_t, accscalar_t, curand4_engine_calls>(iter, gen, [] __device__ (curandStatePhilox4_32_10_t* state) { return curand_uniform4(state); }, uniform_func); } }); } Tensor& uniform_cuda_(Tensor& self, double from, double to, Generator* gen) { auto iter = TensorIterator::nullary_op(self); uniform_kernel_cuda(*iter, from, to, gen); return self; } }} // namespace at::native
c77ba6415229ab73db4335faf51dd16302f97830.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * nullKernelAsync.cu * * Microbenchmark for throughput of asynchronous kernel launch. * * Build with: nvcc -I ../chLib <options> nullKernelAsync.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <math.h> #include "chTimer.h" #include <iostream> #include <fstream> __global__ void NullKernel() { } int main() { const int cIterations = 10000; printf( "Measuring launch time... \n" ); fflush( stdout ); std::ofstream myfile; //output for plots in file launchgrid.txt myfile.open ("launchgrid.txt", std::ios::trunc); //Otherwise for some reason the first measured value is wrong hipLaunchKernelGGL(( NullKernel), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); //loop through different gridsizes for ( int j = 0; j<15; j++) { int NumBlocks = pow(2,j); myfile << NumBlocks << "\t"; chTimerTimestamp start, stop; chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { hipLaunchKernelGGL(( NullKernel), dim3(NumBlocks),dim3(1), 0, 0, ); } hipDeviceSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf("Grid size %i : \t \t", NumBlocks); printf( "asynchronous: %.2f us \t", usPerLaunch ); myfile << usPerLaunch << "\t"; } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { hipLaunchKernelGGL(( NullKernel), dim3(NumBlocks),dim3(1), 0, 0, ); hipDeviceSynchronize(); } hipDeviceSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf( "synchronous: %.2f us\n", usPerLaunch ); myfile << usPerLaunch << "\n"; } } myfile.close(); //output for plots in file launchblock.txt myfile.open ("launchblock.txt", std::ios::trunc); //loop through different blocksizes for ( int j = 0; j<11; j++) { int threadsPerBlock = pow(2,j); myfile << threadsPerBlock << "\t"; chTimerTimestamp start, stop; chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { hipLaunchKernelGGL(( NullKernel), dim3(1),dim3(threadsPerBlock), 0, 0, ); //hipDeviceSynchronize(); } hipDeviceSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf("Block size %i : \t \t", threadsPerBlock); printf( "asynchronous: %.2f us\t", usPerLaunch ); myfile << usPerLaunch << "\t"; } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { hipLaunchKernelGGL(( NullKernel), dim3(1),dim3(threadsPerBlock), 0, 0, ); hipDeviceSynchronize(); } hipDeviceSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf( "asynchronous: %.2f us\n", usPerLaunch ); myfile << usPerLaunch << "\n"; } } myfile.close(); return 0; }
c77ba6415229ab73db4335faf51dd16302f97830.cu
/* * * nullKernelAsync.cu * * Microbenchmark for throughput of asynchronous kernel launch. * * Build with: nvcc -I ../chLib <options> nullKernelAsync.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <math.h> #include "chTimer.h" #include <iostream> #include <fstream> __global__ void NullKernel() { } int main() { const int cIterations = 10000; printf( "Measuring launch time... \n" ); fflush( stdout ); std::ofstream myfile; //output for plots in file launchgrid.txt myfile.open ("launchgrid.txt", std::ios::trunc); //Otherwise for some reason the first measured value is wrong NullKernel<<<1,1>>>(); cudaDeviceSynchronize(); //loop through different gridsizes for ( int j = 0; j<15; j++) { int NumBlocks = pow(2,j); myfile << NumBlocks << "\t"; chTimerTimestamp start, stop; chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { NullKernel<<<NumBlocks,1>>>(); } cudaDeviceSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf("Grid size %i : \t \t", NumBlocks); printf( "asynchronous: %.2f us \t", usPerLaunch ); myfile << usPerLaunch << "\t"; } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { NullKernel<<<NumBlocks,1>>>(); cudaDeviceSynchronize(); } cudaDeviceSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf( "synchronous: %.2f us\n", usPerLaunch ); myfile << usPerLaunch << "\n"; } } myfile.close(); //output for plots in file launchblock.txt myfile.open ("launchblock.txt", std::ios::trunc); //loop through different blocksizes for ( int j = 0; j<11; j++) { int threadsPerBlock = pow(2,j); myfile << threadsPerBlock << "\t"; chTimerTimestamp start, stop; chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { NullKernel<<<1,threadsPerBlock>>>(); //cudaDeviceSynchronize(); } cudaDeviceSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf("Block size %i : \t \t", threadsPerBlock); printf( "asynchronous: %.2f us\t", usPerLaunch ); myfile << usPerLaunch << "\t"; } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { NullKernel<<<1,threadsPerBlock>>>(); cudaDeviceSynchronize(); } cudaDeviceSynchronize(); chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf( "asynchronous: %.2f us\n", usPerLaunch ); myfile << usPerLaunch << "\n"; } } myfile.close(); return 0; }
67a8aae7a8e1afe3f4e58e815cf6e5bb626b7413.hip
// !!! This is a file automatically generated by hipify!!! #include<iostream> #include<fstream> #include<sstream> #include<string> #include<vector> #include<mpi.h> #define __MVM_MULTIGPU_TEST__ #define MPI_NODE_PER_EDGE 4 #define EXACT_SOLUTION_NO 5 #include<../validation_tests/analytical_solutions.hpp> #include<sipg_sem_2d_multigpu.hpp> #include<iomanip> #include<CUDA_TIMER.hpp> int main(int argc, char** argv) { MPI_Init(&argc, &argv); int pid, nprocs; MPI_Comm CartComm; MPI_Comm_rank(MPI_COMM_WORLD, &pid); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if(pid == 0) { std::cerr<<"EXACT_SOLUTION_NO "<<EXACT_SOLUTION_NO<<std::endl; std::cerr<<"MPI_NODE_PER_EDGE "<<MPI_NODE_PER_EDGE<<std::endl; #ifdef USE_MODE_MATRIX std::cout<<"USE_MODE_MATRIX is ON"<<std::endl; #endif } int dims[3] = {MPI_NODE_PER_EDGE, MPI_NODE_PER_EDGE, 1}; int period[3] = {0, 0, 0}; MPI_Cart_create(MPI_COMM_WORLD, 3, dims, period, false, &CartComm); int coords[3] = {0, 0, 0}; MPI_Cart_get(CartComm, 3, dims, period, coords); int degree = 8; // for (int degree = 4; degree < 9; degree*=2) { // const int dim = 1024; for (int dim = 128; dim < 1025; dim*=2) { CUDA_TIMER t; using namespace test_func; square_mesh_multigpu<double> sq_mesh( dim, MPI_NODE_PER_EDGE, coords[0], coords[1] ); sipg_sem_2d_multigpu<double> p(CartComm, degree, sq_mesh, f, u_ex, dx_u_ex, dy_u_ex, 1e-15); p._mvm ( p.d_rhs ); t.start(); for(int t=0; t < 20; ++t) { p._mvm ( p.d_rhs ); p._mvm ( p.d_rhs ); p._mvm ( p.d_rhs ); p._mvm ( p.d_rhs ); p._mvm ( p.d_rhs ); } t.stop(); float mean_time(0); float local_time = t.elapsed_millisecs()/100.0; MPI_Allreduce(&local_time, &mean_time, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD); mean_time = mean_time / (MPI_NODE_PER_EDGE*MPI_NODE_PER_EDGE); if(pid == 0) { std::cerr<<MPI_NODE_PER_EDGE*MPI_NODE_PER_EDGE<<"\t"; std::cerr<<MPI_NODE_PER_EDGE*dim<<"\t"<<degree<<"\t"; std::cerr<<dim<<"\t"; std::cerr<<mean_time; std::cerr<<std::endl; } sq_mesh.device_info.free(); MPI_Barrier(MPI_COMM_WORLD); } if (pid == 0) std::cerr<<std::endl; } #if 0 hipError_t error = hipGetLastError(); std::string lastError = hipGetErrorString(error); std::cout<<lastError<<std::endl; #endif MPI_Finalize(); return 0; }
67a8aae7a8e1afe3f4e58e815cf6e5bb626b7413.cu
#include<iostream> #include<fstream> #include<sstream> #include<string> #include<vector> #include<mpi.h> #define __MVM_MULTIGPU_TEST__ #define MPI_NODE_PER_EDGE 4 #define EXACT_SOLUTION_NO 5 #include<../validation_tests/analytical_solutions.hpp> #include<sipg_sem_2d_multigpu.hpp> #include<iomanip> #include<CUDA_TIMER.hpp> int main(int argc, char** argv) { MPI_Init(&argc, &argv); int pid, nprocs; MPI_Comm CartComm; MPI_Comm_rank(MPI_COMM_WORLD, &pid); MPI_Comm_size(MPI_COMM_WORLD, &nprocs); if(pid == 0) { std::cerr<<"EXACT_SOLUTION_NO "<<EXACT_SOLUTION_NO<<std::endl; std::cerr<<"MPI_NODE_PER_EDGE "<<MPI_NODE_PER_EDGE<<std::endl; #ifdef USE_MODE_MATRIX std::cout<<"USE_MODE_MATRIX is ON"<<std::endl; #endif } int dims[3] = {MPI_NODE_PER_EDGE, MPI_NODE_PER_EDGE, 1}; int period[3] = {0, 0, 0}; MPI_Cart_create(MPI_COMM_WORLD, 3, dims, period, false, &CartComm); int coords[3] = {0, 0, 0}; MPI_Cart_get(CartComm, 3, dims, period, coords); int degree = 8; // for (int degree = 4; degree < 9; degree*=2) { // const int dim = 1024; for (int dim = 128; dim < 1025; dim*=2) { CUDA_TIMER t; using namespace test_func; square_mesh_multigpu<double> sq_mesh( dim, MPI_NODE_PER_EDGE, coords[0], coords[1] ); sipg_sem_2d_multigpu<double> p(CartComm, degree, sq_mesh, f, u_ex, dx_u_ex, dy_u_ex, 1e-15); p._mvm ( p.d_rhs ); t.start(); for(int t=0; t < 20; ++t) { p._mvm ( p.d_rhs ); p._mvm ( p.d_rhs ); p._mvm ( p.d_rhs ); p._mvm ( p.d_rhs ); p._mvm ( p.d_rhs ); } t.stop(); float mean_time(0); float local_time = t.elapsed_millisecs()/100.0; MPI_Allreduce(&local_time, &mean_time, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD); mean_time = mean_time / (MPI_NODE_PER_EDGE*MPI_NODE_PER_EDGE); if(pid == 0) { std::cerr<<MPI_NODE_PER_EDGE*MPI_NODE_PER_EDGE<<"\t"; std::cerr<<MPI_NODE_PER_EDGE*dim<<"\t"<<degree<<"\t"; std::cerr<<dim<<"\t"; std::cerr<<mean_time; std::cerr<<std::endl; } sq_mesh.device_info.free(); MPI_Barrier(MPI_COMM_WORLD); } if (pid == 0) std::cerr<<std::endl; } #if 0 cudaError_t error = cudaGetLastError(); std::string lastError = cudaGetErrorString(error); std::cout<<lastError<<std::endl; #endif MPI_Finalize(); return 0; }
d001e22e22218439f57597eb3e788f77683bbb79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define NUM_THREADS SCAN_NUM_THREADS #define VALUES_PER_THREAD SCAN_VALUES_PER_THREAD #define BLOCKS_PER_SM SCAN_BLOCKS_PER_SM #define NUM_WARPS (NUM_THREADS / WARP_SIZE) #define LOG_NUM_WARPS LOG_BASE_2(NUM_WARPS) #define VALUES_PER_WARP (WARP_SIZE * VALUES_PER_THREAD) #define NUM_VALUES (NUM_THREADS * VALUES_PER_THREAD) //////////////////////////////////////////////////////////////////////////////// // Multiscan utility function. Used in the first and third passes of the // global scan function. Returns the inclusive scan of the arguments in .x and // the sum of all arguments in .y. // Each warp is passed a pointer to its own contiguous area of shared memory. // There must be at least 48 slots of memory. They should also be aligned so // that the difference between the start of consecutive warps differ by an // interval that is relatively prime to 32 (any odd number will do). //////////////////////////////////////////////////////////////////////////////// // GlobalScanUpsweep adds up all the values in elements_global within the // range given by blockCount and writes to blockTotals_global[blockIdx.x]. template<class predicate> __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM) __global__ void CopyIfUpsweep(uint* blockTotals_global, const int2* range_global, predicate pred) { uint block = blockIdx.x; uint tid = threadIdx.x; int2 range = range_global[block]; // Loop through all elements in the interval, adding up values. // There is no need to synchronize until we perform the multiscan. uint sum = 0; for(uint index = range.x + tid; index < range.y; index += 2 * NUM_THREADS) { uint val = 0; if((index + NUM_THREADS) < range.y) val = pred(index + NUM_THREADS); sum += pred(index) + val; } // A full multiscan is unnecessary here - we really only need the total. // But this is easy and won't slow us down since this kernel is already // bandwidth limited. uint total = Multiscan2<NUM_WARPS>(tid, sum).y; if(!tid) blockTotals_global[block] = total; } //////////////////////////////////////////////////////////////////////////////// // GlobalScanDownsweep runs an exclusive scan on the same interval of data as in // pass 1, and adds blockScan_global[blockIdx.x] to each of them, writing back // out in-place. template<class predicate> __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM) __global__ void CopyIfDownsweep(uint* valuesOut_global, const uint* blockScan_global, const int2* range_global, int count, int inclusive, predicate pred) { uint block = blockIdx.x; uint tid = threadIdx.x; uint warp = tid / WARP_SIZE; uint lane = (WARP_SIZE - 1) & tid; uint index = VALUES_PER_WARP * warp + lane; uint blockScan = blockScan_global[block]; int2 range = range_global[block]; const int Size = NUM_WARPS * VALUES_PER_THREAD * (WARP_SIZE + 1); __shared__ volatile uint shared[Size]; // Use a stride of 33 slots per warp per value to allow conflict-free // transposes from strided to thread order. volatile uint* warpShared = shared + warp * VALUES_PER_THREAD * (WARP_SIZE + 1); volatile uint* threadShared = warpShared + lane; // Transpose values into thread order. uint offset = VALUES_PER_THREAD * lane; offset += offset / WARP_SIZE; while(range.x < range.y) { #pragma unroll for(int i = 0; i < VALUES_PER_THREAD; ++i) { uint source = range.x + index + i * WARP_SIZE; if(source < count) { uint x = pred(source); threadShared[i * (WARP_SIZE + 1)] = x; } else { threadShared[i * (WARP_SIZE + 1)] = 0; } } // Transpose into thread order by reading from transposeValues. // Compute the exclusive or inclusive scan of the thread values and // their sum. uint scan[VALUES_PER_THREAD]; uint sum = 0; #pragma unroll for(int i = 0; i < VALUES_PER_THREAD; ++i) { uint x = warpShared[offset + i]; scan[i] = sum; if(inclusive) scan[i] += x; sum += x; } // Multiscan for each thread's scan offset within the block. Subtract // sum to make it an exclusive scan. uint2 localScan = Multiscan2<NUM_WARPS>(tid, sum); uint scanOffset = localScan.x + blockScan - sum; // Add the scan offset to each exclusive scan and put the values back // into the shared memory they came out of. #pragma unroll for(int i = 0; i < VALUES_PER_THREAD; ++i) { uint x = scan[i] + scanOffset; warpShared[offset + i] = x; } // Store the scan back to global memory. #pragma unroll for(int i = 0; i < VALUES_PER_THREAD; ++i) { uint x = threadShared[i * (WARP_SIZE + 1)]; uint target = range.x + index + i * WARP_SIZE; if(target < count) { valuesOut_global[target] = x; } } // Grab the last element of totals_shared, which was set in Multiscan. // This is the total for all the values encountered in this pass. blockScan += localScan.y; range.x += NUM_VALUES; __syncthreads(); // TODO: why !? } if(tid == 0) { valuesOut_global[count] = valuesOut_global[count - 1] + pred(count-1); } // if tid } #undef NUM_THREADS #undef NUM_WARPS #undef LOG_NUM_WARPS #undef BLOCKS_PER_SM #undef VALUES_PER_THREAD #undef VALUES_PER_WARP #undef NUM_VALUES
d001e22e22218439f57597eb3e788f77683bbb79.cu
#define NUM_THREADS SCAN_NUM_THREADS #define VALUES_PER_THREAD SCAN_VALUES_PER_THREAD #define BLOCKS_PER_SM SCAN_BLOCKS_PER_SM #define NUM_WARPS (NUM_THREADS / WARP_SIZE) #define LOG_NUM_WARPS LOG_BASE_2(NUM_WARPS) #define VALUES_PER_WARP (WARP_SIZE * VALUES_PER_THREAD) #define NUM_VALUES (NUM_THREADS * VALUES_PER_THREAD) //////////////////////////////////////////////////////////////////////////////// // Multiscan utility function. Used in the first and third passes of the // global scan function. Returns the inclusive scan of the arguments in .x and // the sum of all arguments in .y. // Each warp is passed a pointer to its own contiguous area of shared memory. // There must be at least 48 slots of memory. They should also be aligned so // that the difference between the start of consecutive warps differ by an // interval that is relatively prime to 32 (any odd number will do). //////////////////////////////////////////////////////////////////////////////// // GlobalScanUpsweep adds up all the values in elements_global within the // range given by blockCount and writes to blockTotals_global[blockIdx.x]. template<class predicate> __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM) __global__ void CopyIfUpsweep(uint* blockTotals_global, const int2* range_global, predicate pred) { uint block = blockIdx.x; uint tid = threadIdx.x; int2 range = range_global[block]; // Loop through all elements in the interval, adding up values. // There is no need to synchronize until we perform the multiscan. uint sum = 0; for(uint index = range.x + tid; index < range.y; index += 2 * NUM_THREADS) { uint val = 0; if((index + NUM_THREADS) < range.y) val = pred(index + NUM_THREADS); sum += pred(index) + val; } // A full multiscan is unnecessary here - we really only need the total. // But this is easy and won't slow us down since this kernel is already // bandwidth limited. uint total = Multiscan2<NUM_WARPS>(tid, sum).y; if(!tid) blockTotals_global[block] = total; } //////////////////////////////////////////////////////////////////////////////// // GlobalScanDownsweep runs an exclusive scan on the same interval of data as in // pass 1, and adds blockScan_global[blockIdx.x] to each of them, writing back // out in-place. template<class predicate> __launch_bounds__(NUM_THREADS, BLOCKS_PER_SM) __global__ void CopyIfDownsweep(uint* valuesOut_global, const uint* blockScan_global, const int2* range_global, int count, int inclusive, predicate pred) { uint block = blockIdx.x; uint tid = threadIdx.x; uint warp = tid / WARP_SIZE; uint lane = (WARP_SIZE - 1) & tid; uint index = VALUES_PER_WARP * warp + lane; uint blockScan = blockScan_global[block]; int2 range = range_global[block]; const int Size = NUM_WARPS * VALUES_PER_THREAD * (WARP_SIZE + 1); __shared__ volatile uint shared[Size]; // Use a stride of 33 slots per warp per value to allow conflict-free // transposes from strided to thread order. volatile uint* warpShared = shared + warp * VALUES_PER_THREAD * (WARP_SIZE + 1); volatile uint* threadShared = warpShared + lane; // Transpose values into thread order. uint offset = VALUES_PER_THREAD * lane; offset += offset / WARP_SIZE; while(range.x < range.y) { #pragma unroll for(int i = 0; i < VALUES_PER_THREAD; ++i) { uint source = range.x + index + i * WARP_SIZE; if(source < count) { uint x = pred(source); threadShared[i * (WARP_SIZE + 1)] = x; } else { threadShared[i * (WARP_SIZE + 1)] = 0; } } // Transpose into thread order by reading from transposeValues. // Compute the exclusive or inclusive scan of the thread values and // their sum. uint scan[VALUES_PER_THREAD]; uint sum = 0; #pragma unroll for(int i = 0; i < VALUES_PER_THREAD; ++i) { uint x = warpShared[offset + i]; scan[i] = sum; if(inclusive) scan[i] += x; sum += x; } // Multiscan for each thread's scan offset within the block. Subtract // sum to make it an exclusive scan. uint2 localScan = Multiscan2<NUM_WARPS>(tid, sum); uint scanOffset = localScan.x + blockScan - sum; // Add the scan offset to each exclusive scan and put the values back // into the shared memory they came out of. #pragma unroll for(int i = 0; i < VALUES_PER_THREAD; ++i) { uint x = scan[i] + scanOffset; warpShared[offset + i] = x; } // Store the scan back to global memory. #pragma unroll for(int i = 0; i < VALUES_PER_THREAD; ++i) { uint x = threadShared[i * (WARP_SIZE + 1)]; uint target = range.x + index + i * WARP_SIZE; if(target < count) { valuesOut_global[target] = x; } } // Grab the last element of totals_shared, which was set in Multiscan. // This is the total for all the values encountered in this pass. blockScan += localScan.y; range.x += NUM_VALUES; __syncthreads(); // TODO: why !? } if(tid == 0) { valuesOut_global[count] = valuesOut_global[count - 1] + pred(count-1); } // if tid } #undef NUM_THREADS #undef NUM_WARPS #undef LOG_NUM_WARPS #undef BLOCKS_PER_SM #undef VALUES_PER_THREAD #undef VALUES_PER_WARP #undef NUM_VALUES
c862f268d23ad8d7794646862b20463800df6e98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_tanh (int n, double *result, double *x) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = tanh(x[id]); } }
c862f268d23ad8d7794646862b20463800df6e98.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_tanh (int n, double *result, double *x) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = tanh(x[id]); } }
11a69918acc1645f4a25aee9bcf78c1992a7572e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdio.h" #include <assert.h> #include <cusparse_v2.h> #include "interpolate.hh" #define gpuErrchk_here(ans) { gpuAssert_here((ans), __FILE__, __LINE__); } inline void gpuAssert_here(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define ERR_NE(X,Y) do { if ((X) != (Y)) { \ fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \ exit(-1);}} while(0) #define CUDA_CALL(X) ERR_NE((X),hipSuccess) #define CUSPARSE_CALL(X) ERR_NE((X),HIPSPARSE_STATUS_SUCCESS) using namespace std; InterpArrayContainer * createInterpArrayContainer(size_t *numBytes, int num_arr, int num_points){ InterpArrayContainer *cpu_array_container; size_t InterpArrayContainer_size = sizeof(InterpArrayContainer); *numBytes = num_arr*InterpArrayContainer_size; cpu_array_container = (InterpArrayContainer*)malloc(*numBytes); for (int i=0; i<num_arr; i++){ gpuErrchk_here(hipMalloc( (void**)&(cpu_array_container[i].array), num_points*sizeof(double) )); gpuErrchk_here(hipMalloc( (void**)&(cpu_array_container[i].coeff_1), (num_points-1)*sizeof(double) )); gpuErrchk_here(hipMalloc( (void**)&(cpu_array_container[i].coeff_2), (num_points-1)*sizeof(double) )); gpuErrchk_here(hipMalloc( (void**)&(cpu_array_container[i].coeff_3), (num_points-1)*sizeof(double) )); } return cpu_array_container; //hipMalloc((void**)&gpu_array_container, *numBytes); } InterpArrayContainer * createInterpArrayContainer_gpu(size_t numBytes, InterpArrayContainer *cpu_array_container){ InterpArrayContainer *gpu_array_container; hipMalloc((void**)&gpu_array_container, numBytes); hipMemcpy(gpu_array_container, cpu_array_container, numBytes, hipMemcpyHostToDevice); return gpu_array_container; } void destroyInterpArrayContainer(InterpArrayContainer * gpu_array_container, InterpArrayContainer *cpu_array_container, int num_arr){ for (int i=0; i<num_arr; i++){ gpuErrchk_here(hipFree(cpu_array_container[i].array)); gpuErrchk_here(hipFree(cpu_array_container[i].coeff_1)); gpuErrchk_here(hipFree(cpu_array_container[i].coeff_2)); gpuErrchk_here(hipFree(cpu_array_container[i].coeff_3)); } gpuErrchk_here(hipFree(gpu_array_container)); free(cpu_array_container); } Interpolate::Interpolate(){ int pass = 0; } __host__ void Interpolate::alloc_arrays(int max_length_init, int num_arr){ gpuErrchk_here(hipMalloc(&d_B, max_length_init*num_arr*sizeof(double))); gpuErrchk_here(hipMalloc(&d_dl, max_length_init*num_arr*sizeof(double))); gpuErrchk_here(hipMalloc(&d_d, max_length_init*num_arr*sizeof(double))); gpuErrchk_here(hipMalloc(&d_du, max_length_init*num_arr*sizeof(double))); } __device__ void prep_splines(int i, int length, double *b, double *ud, double *diag, double *ld, double *x, double *y){ double dx1, dx2, d, slope1, slope2; if (i == length - 1){ dx1 = x[length - 2] - x[length - 3]; dx2 = x[length - 1] - x[length - 2]; d = x[length - 1] - x[length - 3]; slope1 = (y[length - 2] - y[length - 3])/dx1; slope2 = (y[length - 1] - y[length - 2])/dx2; b[length - 1] = ((dx2*dx2*slope1 + (2*d + dx2)*dx1*slope2) / d); diag[length - 1] = dx1; ld[length - 1] = d; ud[length - 1] = 0.0; } else if (i == 0){ dx1 = x[1] - x[0]; dx2 = x[2] - x[1]; d = x[2] - x[0]; //amp slope1 = (y[1] - y[0])/dx1; slope2 = (y[2] - y[1])/dx2; b[0] = ((dx1 + 2*d) * dx2 * slope1 + dx1*dx1 * slope2) / d; diag[0] = dx2; ud[0] = d; ld[0] = 0.0; } else{ dx1 = x[i] - x[i-1]; dx2 = x[i+1] - x[i]; //amp slope1 = (y[i] - y[i-1])/dx1; slope2 = (y[i+1] - y[i])/dx2; b[i] = 3.0* (dx2*slope1 + dx1*slope2); diag[i] = 2*(dx1 + dx2); ud[i] = dx1; ld[i] = dx2; } } /* fill the B array on the GPU for response transfer functions. */ __device__ void fill_B(InterpArrayContainer *array_container, double *B, double *tvals, double *upper_diag, double *diag, double *lower_diag, int length, int num_splines, int spline_i, int i){ int num_pars = 8; int lead_ind; // phaseRdelay lead_ind = spline_i*length; prep_splines(i, length, &B[lead_ind], &upper_diag[lead_ind], &diag[lead_ind], &lower_diag[lead_ind], tvals, array_container[spline_i].array); } __global__ void fill_B_wrap(InterpArrayContainer *array_container, double *tvals, double *B, double *upper_diag, double *diag, double *lower_diag, int length, int num_splines){ for (int spline_i = blockIdx.y * blockDim.y + threadIdx.y; spline_i < num_splines; spline_i += blockDim.y * gridDim.y){ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < length; i += blockDim.x * gridDim.x){ fill_B(array_container, B, tvals, upper_diag, diag, lower_diag, length, num_splines, spline_i, i); } } } __device__ void fill_coefficients(int i, int length, double *dydx, double dx, double *y, double *coeff1, double *coeff2, double *coeff3){ double slope, t, dydx_i; slope = (y[i+1] - y[i])/dx; dydx_i = dydx[i]; t = (dydx_i + dydx[i+1] - 2*slope)/dx; coeff1[i] = dydx_i; coeff2[i] = (slope - dydx_i) / dx - t; coeff3[i] = t/dx; } /* find spline constants based on matrix solution for response transfer functions. */ __device__ void set_spline_constants(InterpArrayContainer *array_container, double *B, int length, int num_splines, int spline_i, int i, double dt){ int lead_ind; // phaseRdelay lead_ind = spline_i*length; fill_coefficients(i, length, &B[lead_ind], dt, array_container[spline_i].array, array_container[spline_i].coeff_1, array_container[spline_i].coeff_2, array_container[spline_i].coeff_3); } __global__ void set_spline_constants_wrap(InterpArrayContainer *array_container, double *B, int length, int num_splines, double *tvals){ int num_pars = 8; int spline_index; double dt; for (int spline_i = blockIdx.y * blockDim.y + threadIdx.y; spline_i < num_splines; spline_i += blockDim.y * gridDim.y){ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < length-1; i += blockDim.x * gridDim.x){ dt = tvals[i + 1] - tvals[i]; set_spline_constants(array_container, B, length, num_splines, spline_i, i, dt); } } } void fit_constants_serial_wrap(int m, int n, double *a, double *b, double *c, double *d_in){ void *pBuffer; hipsparseStatus_t stat; hipsparseHandle_t handle; size_t bufferSizeInBytes; CUSPARSE_CALL(hipsparseCreate(&handle)); CUSPARSE_CALL( hipsparseDgtsv2StridedBatch_bufferSizeExt(handle, m, a, b, c, d_in, n, m, &bufferSizeInBytes)); gpuErrchk_here(hipMalloc(&pBuffer, bufferSizeInBytes)); CUSPARSE_CALL(hipsparseDgtsv2StridedBatch(handle, m, a, // dl b, //diag c, // du d_in, n, m, pBuffer)); CUSPARSE_CALL(hipsparseDestroy(handle)); gpuErrchk_here(hipFree(pBuffer)); } void Interpolate::setup(InterpArrayContainer *array_container, double *d_tvec, int m_, int n_){ m = m_; n = n_; int NUM_THREADS = 256; int num_blocks = ::ceil((m + NUM_THREADS -1)/NUM_THREADS); dim3 interpGrid(num_blocks, n); hipLaunchKernelGGL(( fill_B_wrap), dim3(interpGrid), dim3(NUM_THREADS), 0, 0, array_container, d_tvec, d_B, d_du, d_d, d_dl, m, n); hipDeviceSynchronize(); gpuErrchk_here(hipGetLastError()); fit_constants_serial_wrap(m, n, d_dl, d_d, d_du, d_B); hipLaunchKernelGGL(( set_spline_constants_wrap), dim3(interpGrid), dim3(NUM_THREADS), 0, 0, array_container, d_B, m, n, d_tvec); hipDeviceSynchronize(); gpuErrchk_here(hipGetLastError()); } __host__ Interpolate::~Interpolate(){ hipFree(d_dl); hipFree(d_du); hipFree(d_d); hipFree(d_B); //delete[] d; //delete[] dl; //delete[] du; }
11a69918acc1645f4a25aee9bcf78c1992a7572e.cu
#include "stdio.h" #include <assert.h> #include <cusparse_v2.h> #include "interpolate.hh" #define gpuErrchk_here(ans) { gpuAssert_here((ans), __FILE__, __LINE__); } inline void gpuAssert_here(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define ERR_NE(X,Y) do { if ((X) != (Y)) { \ fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \ exit(-1);}} while(0) #define CUDA_CALL(X) ERR_NE((X),cudaSuccess) #define CUSPARSE_CALL(X) ERR_NE((X),CUSPARSE_STATUS_SUCCESS) using namespace std; InterpArrayContainer * createInterpArrayContainer(size_t *numBytes, int num_arr, int num_points){ InterpArrayContainer *cpu_array_container; size_t InterpArrayContainer_size = sizeof(InterpArrayContainer); *numBytes = num_arr*InterpArrayContainer_size; cpu_array_container = (InterpArrayContainer*)malloc(*numBytes); for (int i=0; i<num_arr; i++){ gpuErrchk_here(cudaMalloc( (void**)&(cpu_array_container[i].array), num_points*sizeof(double) )); gpuErrchk_here(cudaMalloc( (void**)&(cpu_array_container[i].coeff_1), (num_points-1)*sizeof(double) )); gpuErrchk_here(cudaMalloc( (void**)&(cpu_array_container[i].coeff_2), (num_points-1)*sizeof(double) )); gpuErrchk_here(cudaMalloc( (void**)&(cpu_array_container[i].coeff_3), (num_points-1)*sizeof(double) )); } return cpu_array_container; //cudaMalloc((void**)&gpu_array_container, *numBytes); } InterpArrayContainer * createInterpArrayContainer_gpu(size_t numBytes, InterpArrayContainer *cpu_array_container){ InterpArrayContainer *gpu_array_container; cudaMalloc((void**)&gpu_array_container, numBytes); cudaMemcpy(gpu_array_container, cpu_array_container, numBytes, cudaMemcpyHostToDevice); return gpu_array_container; } void destroyInterpArrayContainer(InterpArrayContainer * gpu_array_container, InterpArrayContainer *cpu_array_container, int num_arr){ for (int i=0; i<num_arr; i++){ gpuErrchk_here(cudaFree(cpu_array_container[i].array)); gpuErrchk_here(cudaFree(cpu_array_container[i].coeff_1)); gpuErrchk_here(cudaFree(cpu_array_container[i].coeff_2)); gpuErrchk_here(cudaFree(cpu_array_container[i].coeff_3)); } gpuErrchk_here(cudaFree(gpu_array_container)); free(cpu_array_container); } Interpolate::Interpolate(){ int pass = 0; } __host__ void Interpolate::alloc_arrays(int max_length_init, int num_arr){ gpuErrchk_here(cudaMalloc(&d_B, max_length_init*num_arr*sizeof(double))); gpuErrchk_here(cudaMalloc(&d_dl, max_length_init*num_arr*sizeof(double))); gpuErrchk_here(cudaMalloc(&d_d, max_length_init*num_arr*sizeof(double))); gpuErrchk_here(cudaMalloc(&d_du, max_length_init*num_arr*sizeof(double))); } __device__ void prep_splines(int i, int length, double *b, double *ud, double *diag, double *ld, double *x, double *y){ double dx1, dx2, d, slope1, slope2; if (i == length - 1){ dx1 = x[length - 2] - x[length - 3]; dx2 = x[length - 1] - x[length - 2]; d = x[length - 1] - x[length - 3]; slope1 = (y[length - 2] - y[length - 3])/dx1; slope2 = (y[length - 1] - y[length - 2])/dx2; b[length - 1] = ((dx2*dx2*slope1 + (2*d + dx2)*dx1*slope2) / d); diag[length - 1] = dx1; ld[length - 1] = d; ud[length - 1] = 0.0; } else if (i == 0){ dx1 = x[1] - x[0]; dx2 = x[2] - x[1]; d = x[2] - x[0]; //amp slope1 = (y[1] - y[0])/dx1; slope2 = (y[2] - y[1])/dx2; b[0] = ((dx1 + 2*d) * dx2 * slope1 + dx1*dx1 * slope2) / d; diag[0] = dx2; ud[0] = d; ld[0] = 0.0; } else{ dx1 = x[i] - x[i-1]; dx2 = x[i+1] - x[i]; //amp slope1 = (y[i] - y[i-1])/dx1; slope2 = (y[i+1] - y[i])/dx2; b[i] = 3.0* (dx2*slope1 + dx1*slope2); diag[i] = 2*(dx1 + dx2); ud[i] = dx1; ld[i] = dx2; } } /* fill the B array on the GPU for response transfer functions. */ __device__ void fill_B(InterpArrayContainer *array_container, double *B, double *tvals, double *upper_diag, double *diag, double *lower_diag, int length, int num_splines, int spline_i, int i){ int num_pars = 8; int lead_ind; // phaseRdelay lead_ind = spline_i*length; prep_splines(i, length, &B[lead_ind], &upper_diag[lead_ind], &diag[lead_ind], &lower_diag[lead_ind], tvals, array_container[spline_i].array); } __global__ void fill_B_wrap(InterpArrayContainer *array_container, double *tvals, double *B, double *upper_diag, double *diag, double *lower_diag, int length, int num_splines){ for (int spline_i = blockIdx.y * blockDim.y + threadIdx.y; spline_i < num_splines; spline_i += blockDim.y * gridDim.y){ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < length; i += blockDim.x * gridDim.x){ fill_B(array_container, B, tvals, upper_diag, diag, lower_diag, length, num_splines, spline_i, i); } } } __device__ void fill_coefficients(int i, int length, double *dydx, double dx, double *y, double *coeff1, double *coeff2, double *coeff3){ double slope, t, dydx_i; slope = (y[i+1] - y[i])/dx; dydx_i = dydx[i]; t = (dydx_i + dydx[i+1] - 2*slope)/dx; coeff1[i] = dydx_i; coeff2[i] = (slope - dydx_i) / dx - t; coeff3[i] = t/dx; } /* find spline constants based on matrix solution for response transfer functions. */ __device__ void set_spline_constants(InterpArrayContainer *array_container, double *B, int length, int num_splines, int spline_i, int i, double dt){ int lead_ind; // phaseRdelay lead_ind = spline_i*length; fill_coefficients(i, length, &B[lead_ind], dt, array_container[spline_i].array, array_container[spline_i].coeff_1, array_container[spline_i].coeff_2, array_container[spline_i].coeff_3); } __global__ void set_spline_constants_wrap(InterpArrayContainer *array_container, double *B, int length, int num_splines, double *tvals){ int num_pars = 8; int spline_index; double dt; for (int spline_i = blockIdx.y * blockDim.y + threadIdx.y; spline_i < num_splines; spline_i += blockDim.y * gridDim.y){ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < length-1; i += blockDim.x * gridDim.x){ dt = tvals[i + 1] - tvals[i]; set_spline_constants(array_container, B, length, num_splines, spline_i, i, dt); } } } void fit_constants_serial_wrap(int m, int n, double *a, double *b, double *c, double *d_in){ void *pBuffer; cusparseStatus_t stat; cusparseHandle_t handle; size_t bufferSizeInBytes; CUSPARSE_CALL(cusparseCreate(&handle)); CUSPARSE_CALL( cusparseDgtsv2StridedBatch_bufferSizeExt(handle, m, a, b, c, d_in, n, m, &bufferSizeInBytes)); gpuErrchk_here(cudaMalloc(&pBuffer, bufferSizeInBytes)); CUSPARSE_CALL(cusparseDgtsv2StridedBatch(handle, m, a, // dl b, //diag c, // du d_in, n, m, pBuffer)); CUSPARSE_CALL(cusparseDestroy(handle)); gpuErrchk_here(cudaFree(pBuffer)); } void Interpolate::setup(InterpArrayContainer *array_container, double *d_tvec, int m_, int n_){ m = m_; n = n_; int NUM_THREADS = 256; int num_blocks = std::ceil((m + NUM_THREADS -1)/NUM_THREADS); dim3 interpGrid(num_blocks, n); fill_B_wrap<<<interpGrid, NUM_THREADS>>>(array_container, d_tvec, d_B, d_du, d_d, d_dl, m, n); cudaDeviceSynchronize(); gpuErrchk_here(cudaGetLastError()); fit_constants_serial_wrap(m, n, d_dl, d_d, d_du, d_B); set_spline_constants_wrap<<<interpGrid, NUM_THREADS>>>(array_container, d_B, m, n, d_tvec); cudaDeviceSynchronize(); gpuErrchk_here(cudaGetLastError()); } __host__ Interpolate::~Interpolate(){ cudaFree(d_dl); cudaFree(d_du); cudaFree(d_d); cudaFree(d_B); //delete[] d; //delete[] dl; //delete[] du; }
e84d41675fe6b7cb373034017029b8e5f137e882.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/PinnedMemoryAllocator.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/hip/MiscUtils.h> #include <THH/THH.h> // for USE_MAGMA #ifdef USE_MAGMA #include <magma.h> #include <magma_types.h> #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLu( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info); template<class scalar_t> void magmaLuBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLuNoPiv( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaLuNoPivBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n); template<class scalar_t> void magmaGetri( magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork, magma_int_t lwork, magma_int_t* info); template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholeskySolve( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaCholeskySolveBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholesky( magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaTriangularSolve( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb); template<class scalar_t> void magmaTriangularSolveBatched( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n); template<class scalar_t> void magmaGeqrf( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2); template<class scalar_t> void magmaOrgqr( magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info); template<class scalar_t> void magmaSymeig( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info); template<class scalar_t> void magmaSvd( magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A, magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu, scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info); template<class scalar_t> void magmaLuSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaLuSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<> void magmaSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLu<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPiv<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuNoPivBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) { return magma_get_dgetri_nb(n); } template<> inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) { return magma_get_sgetri_nb(n); } template<> void magmaGetri<double>( magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGetri<float>( magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolve<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskySolveBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<double>( magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholesky<float>( magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolve<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) { MagmaStreamSyncGuard guard; magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolve<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) { MagmaStreamSyncGuard guard; magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaTriangularSolveBatched<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) { return magma_get_dgeqrf_nb(m, n); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) { return magma_get_sgeqrf_nb(m, n); } template<> void magmaGeqrf<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaGeqrf<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaOrgqr<double>( magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaOrgqr<float>( magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSymeig<float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A, magma_int_t lda, double* s, double* U, magma_int_t ldu, double* VT, magma_int_t ldvt, double* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaSvd<float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A, magma_int_t lda, float* s, float* U, magma_int_t ldu, float* VT, magma_int_t ldvt, float* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } template<> void magmaLuSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(hipGetLastError()); } #endif #define ALLOCATE_ARRAY(name, type, size) \ auto storage_##name = pin_memory<type>(size); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); if (b.dim() == 2) { auto ipiv = at::empty({n}, at::kInt); magma_int_t info = 0; magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(), b_data, n, &info); infos[0] = info; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaSolveBatched<scalar_t>( n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaSolveBatched<scalar_t>( n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); std::vector<int64_t> infos(batchCount(self), 0); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{ apply_solve<scalar_t>(self_working_copy, A_working_copy, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "solve_cuda"); } else { singleCheckErrors(infos[0], "solve_cuda"); } return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(self.get_device()); magmaLuBatched<scalar_t>( n, n, self_array, n, ipiv_array, info_array, batch_size, magma_queue); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; scalar_t** self_inv_array_cur = &self_inv_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaGetriBatched<scalar_t>( n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaGetriBatched<scalar_t>( n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } template <typename scalar_t> static void apply_single_inverse(Tensor& self, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n); magma_int_t info_tmp = 0; Tensor ipiv = at::empty({n}, at::kInt); Tensor dwork = at::empty({lwork}, self.options()); magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp); if (info_tmp != 0) { info = info_tmp; return; } magmaGetri<scalar_t>( n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp); info = info_tmp; #endif } Tensor _inverse_helper_cuda(const Tensor& self) { auto self_inv_working_copy = cloneBatchedColumnMajor(self); if (self.dim() > 2) { std::vector<int64_t> infos(batchCount(self), 0); auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse_cuda"); } else { int64_t info = 0; AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse<scalar_t>(self_inv_working_copy, info); }); singleCheckErrors(info, "inverse_cuda"); } return self_inv_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("cholesky_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); int info_tmp = 0; if (b.dim() == 2) { magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n, b_data, n, &info_tmp); info = info_tmp; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, A_array_cur, n, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{ apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info); }); TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); if (self.dim() == 2) { magma_int_t info = 0; magmaCholesky<scalar_t>(uplo, n, self_data, n, &info); infos[0] = info; } else { auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); constexpr int64_t batch_limit = 262140; // Compute as many batches of 262140 possible // 262140 is the size of the largest batch of matrices that can be run with // violating maximum kernel configuration // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaCholeskyBatched<scalar_t>( uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaCholeskyBatched<scalar_t>( uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor self_working_copy; if (upper) { self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2)); } else { self_working_copy = cloneBatchedColumnMajor(self); } AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{ apply_cholesky<scalar_t>(self_working_copy, false, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "cholesky_cuda"); } else { singleCheckErrors(infos[0], "cholesky_cuda"); } if (upper) { return self_working_copy.transpose(-1, -2); } else { return self_working_copy; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) { #ifndef USE_MAGMA AT_ERROR("lu: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_int_t k = ::min(m, n); if (self.dim() == 2) { // If `pivots` is defined, then we have to compute them. // magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute // the partially-pivoted LU decomposition with / without pivots. // The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots. // The data is later copied back to the appropriate output tensor. Tensor info_tmp = at::zeros({}, at::kInt); if (get_pivots) { Tensor piv_tmp = at::empty({k}, at::kInt); magmaLu<scalar_t>( m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>()); pivots.copy_(piv_tmp); } else { magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>()); } infos.copy_(info_tmp); } else { auto self_matrix_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); scalar_t** self_array; ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_matrix_stride]; } MAGMAQueue magma_queue(self.get_device()); // Same comment as in the case of single matrix above. if (get_pivots) { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto pivots_matrix_stride = pivots.size(-1); magma_int_t** pivots_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_matrix_stride]; } magmaLuBatched<scalar_t>( m, n, self_array, m, pivots_array, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } else { magmaLuNoPivBatched<scalar_t>( m, n, self_array, m, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } } #endif } std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) { TORCH_CHECK(self.dim() >= 2, "expected tensor with 2 or more dimensions, got size: ", self.sizes(), " instead"); auto m = self.size(-2); auto n = self.size(-1); auto k = ::min(m, n); auto req_size = self.sizes().vec(); req_size.pop_back(); req_size.back() = k; Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous(); req_size.pop_back(); auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt)); Tensor self_working_copy; if (self.numel() == 0) { self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } else { self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{ apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot); if (self.dim() > 2 && pivot && m == n && m <= 32) { /* The magma implementation of small singular square batch matrices has a bug that results nan values in the LU factorization results, see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on TODO: disable this block for magma versions that implement a bug fix */ auto batch_size = infos_tensor.numel(); auto infos_array = infos_tensor.view({batch_size}); auto infos_cpu = infos_array.to(at::kCPU); auto infos_data = infos_cpu.data_ptr<int>(); auto input_array = self.view({batch_size, m, n}); auto working_array = self_working_copy.view({batch_size, m, n}); auto pivots_array = pivots_tensor.view({batch_size, k}); for (int64_t i = 0; i < batch_size; i++) { auto info = infos_data[i]; if (info > 0) { /* We'll recompute LU factorization of singular matrices using the non-batch implementation to workaround the magma bug (magma issue 13). */ working_array[i].copy_(input_array[i]); auto matrix = working_array[i]; auto pivots = pivots_array[i]; auto infos = infos_array[i]; apply_lu<scalar_t>(matrix, pivots, infos, pivot); } } } }); } if (check_errors) { if (self.dim() == 2) { singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true); } else { batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true); } } return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) { #ifndef USE_MAGMA AT_ERROR("triangular_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans; magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); // batch_size == 1 implies that: // 1. the RHS and LHS tensors have 2 dimensions, or // 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1 if (batch_size == 1) { magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n); } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, A_array_cur, n, b_array_cur, n, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper, bool transpose, bool unitriangular) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular); }); return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("qr: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto q_data = Q.data_ptr<scalar_t>(); auto r_data = R.data_ptr<scalar_t>(); auto q_matrix_stride = matrixStride(Q); auto r_matrix_stride = matrixStride(R); magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)"); magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)"); magma_int_t k = m < n ? m : n; magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); int64_t batch_size = batchCount(R); // magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors. // The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors. Tensor tau = at::empty({k}, Q.options().device(at::kCPU)); Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options()); scalar_t* tau_data = tau.data_ptr<scalar_t>(); scalar_t* work_data = work.data_ptr<scalar_t>(); // This phase computes R (the raw version) // This uses MAGMA's ?geqrf2_gpu function magma_int_t info = 0; for (int64_t i = 0; i < batch_size; i++) { scalar_t* r_working_ptr = &r_data[i * r_matrix_stride]; magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true); infos[i] = info; if (info != 0) { return; } } // This phase computes Q (the raw version) // We require to perform ?geqrf_gpu again due to this bug in MAGMA: // - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly. // - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu // Refer to the below link for more details: // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 for (int64_t i = 0; i < batch_size; i++) { scalar_t* q_working_ptr = &q_data[i * q_matrix_stride]; magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false); infos[i] = info; if (info != 0) { return; } magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) { std::vector<int64_t> infos(batchCount(self), 0); // Setup input geometry and inputs for apply_qr std::vector<int64_t> q_sizes, q_strides; int64_t n_columns_q; std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some); Tensor q_working_copy, r_working_copy; // If there are no elements, then we simply return a pair of tensors of required dimensions if (self.numel() == 0) { // Fix the number of columns of q_working_copy appropriately q_sizes[self.dim() - 1] = n_columns_q; q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options()); q_working_copy = q_working_copy.expand_as(q_working_copy); // We repurpose the same q_sizes for r_working_copy // Fix the number of rows and columns of q_working_copy appropriately q_sizes[self.dim() - 1] = self.size(-1); q_sizes[self.dim() - 2] = n_columns_q; r_working_copy = at::empty(q_sizes, self.options()); return std::make_tuple(q_working_copy, r_working_copy); } q_working_copy = at::empty_strided(q_sizes, q_strides, self.options()); q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self); r_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{ apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "qr_cuda"); } else { singleCheckErrors(infos[0], "qr_cuda"); } return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q), r_working_copy.narrow(-2, 0, n_columns_q).triu()); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("symeig: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto eigvals_data = eigvals.data_ptr<scalar_t>(); auto self_matrix_stride = matrixStride(self); auto eigvals_stride = eigvals.size(-1); int64_t batch_size = batchCount(self); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec; scalar_t* wA; ALLOCATE_ARRAY(wA, scalar_t, n * n); magma_int_t info; // Run once, first to get the optimum work sizes. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t liwork = -1; magma_int_t iwkopt; magmaSymeig<scalar_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &iwkopt, liwork, &info); scalar_t* work; magma_int_t* iwork; lwork = magma_int_cast(wkopt, "work_size"); liwork = magma_int_cast(iwkopt, "iwork_size"); ALLOCATE_ARRAY(work, scalar_t, lwork); ALLOCATE_ARRAY(iwork, magma_int_t, liwork); for (int64_t i = 0; i < batch_size; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; scalar_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride]; magmaSymeig<scalar_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr, wA, n, work, lwork, iwork, liwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); auto self_sizes = self.sizes().vec(); self_sizes.pop_back(); // magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors. // The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues. // The data is later moved to the appropriate device. // In the case where self.numel() == 0, we just return an empty tensor of // dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)") auto eigvals_working_copy = self.numel() == 0 ? at::empty(self_sizes, self.options()) : at::empty(self_sizes, self.options().device(at::kCPU)); if (self.numel() == 0) { return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT)); } auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "symeig_cuda", [&]{ apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "symeig_cuda"); } else { singleCheckErrors(infos[0], "symeig_cuda"); } if (eigenvectors) { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy); } else { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options())); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<typename scalar_t> static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT, char jobchar, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("svd: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<scalar_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); auto batchsize = batchCount(self); magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); auto k = ::min(m, n); magma_int_t info = 0; // Run once, first to get the optimum work size. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t* iwork; ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k); magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info); lwork = magma_int_cast(wkopt, "work_size"); scalar_t* work; ALLOCATE_ARRAY(work, scalar_t, lwork); for (int64_t i = 0; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_stride]; scalar_t* S_working_ptr = &S_data[i * S_stride]; scalar_t* U_working_ptr = &U_data[i * U_stride]; scalar_t* VT_working_ptr = &VT_data[i * VT_stride]; // Compute S, U (optionally), VT (optionally) magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m, S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) { std::vector<int64_t> infos(batchCount(self), 0); int64_t m = self.size(-2), n = self.size(-1); int64_t k = ::min(m, n); char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N'; Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv); if (self.numel() > 0) { // The input matrix, U, S and VT have to reside in pinned memory. // Additionally, the input and U have to be in column major format. // _create_U_S_VT takes care of a part of these requirements (for U, S and VT) // For the input matrix, this requirements are being taken care of below. // Specify strides auto self_col_major_strides = at::detail::defaultStrides(self.sizes()); self_col_major_strides[self.dim() - 2] = 1; self_col_major_strides[self.dim() - 1] = m; // Create strided tensor in pinned memory auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides, at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{ apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "svd_cuda"); } else { singleCheckErrors(infos[0], "svd_cuda"); } U_working_copy = same_stride_to(U_working_copy, self.options()); S_working_copy = same_stride_to(S_working_copy, self.options()); VT_working_copy = same_stride_to(VT_working_copy, self.options()); if (compute_uv) { if (some) { VT_working_copy = VT_working_copy.narrow(-1, 0, k); } } else { VT_working_copy.zero_(); U_working_copy.zero_(); } } else { U_working_copy = same_stride_to(U_working_copy, self.options()).zero_(); S_working_copy = same_stride_to(S_working_copy, self.options()); VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_(); } return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("lu_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto n = lu.size(-2); auto nrhs = b.size(-1); int info_tmp = 0; if (b.dim() == 2) { Tensor pivots_tmp = pivots.cpu(); magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp); info = info_tmp; } else { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto b_stride = matrixStride(b); auto lu_stride = matrixStride(lu); auto pivots_stride = pivots.size(-1); magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount"); magma_int_t** pivots_array; scalar_t** lu_array; scalar_t** b_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_stride]; b_array[i] = &b_data[i * b_stride]; lu_array[i] = &lu_data[i * lu_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** lu_array_cur = &lu_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** pivots_array_cur = &pivots_array[mini_idx]; magmaLuSolveBatched<scalar_t>( n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaLuSolveBatched<scalar_t>( n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data); auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous(); if (self.numel() == 0 || LU_data.numel() == 0) { return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{ apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info); }); TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info); return self_working_copy; } }} // namespace at::native #undef ALLOCATE_ARRAY
e84d41675fe6b7cb373034017029b8e5f137e882.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/PinnedMemoryAllocator.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/cuda/MiscUtils.h> #include <THC/THC.h> // for USE_MAGMA #ifdef USE_MAGMA #include <magma.h> #include <magma_types.h> #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLu( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info); template<class scalar_t> void magmaLuBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaLuNoPiv( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaLuNoPivBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n); template<class scalar_t> void magmaGetri( magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork, magma_int_t lwork, magma_int_t* info); template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholeskySolve( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaCholeskySolveBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaCholesky( magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* info); template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> void magmaTriangularSolve( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb); template<class scalar_t> void magmaTriangularSolveBatched( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<class scalar_t> inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n); template<class scalar_t> void magmaGeqrf( magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2); template<class scalar_t> void magmaOrgqr( magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA, magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info); template<class scalar_t> void magmaSymeig( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda, scalar_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info); template<class scalar_t> void magmaSvd( magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A, magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu, scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info); template<class scalar_t> void magmaLuSolve( magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info); template<class scalar_t> void magmaLuSolveBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue); template<> void magmaSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLu<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPiv<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuNoPivBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) { return magma_get_dgetri_nb(n); } template<> inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) { return magma_get_sgetri_nb(n); } template<> void magmaGetri<double>( magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGetri<float>( magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork, magma_int_t lwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolve<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskySolveBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<double>( magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dpotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholesky<float>( magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_spotrf_gpu(uplo, n, dA, ldda, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolve<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) { MagmaStreamSyncGuard guard; magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolve<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) { MagmaStreamSyncGuard guard; magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<double>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaTriangularSolveBatched<float>( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) { return magma_get_dgeqrf_nb(m, n); } template<> inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) { return magma_get_sgeqrf_nb(m, n); } template<> void magmaGeqrf<double>( magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaGeqrf<float>( magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t* info, bool is_v2) { MagmaStreamSyncGuard guard; if (!is_v2) { magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info); } else { magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info); } AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaOrgqr<double>( magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda, double* tau, double* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaOrgqr<float>( magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda, float* tau, float* dT, magma_int_t nb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<double>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda, double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSymeig<float>( magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda, float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<double>( magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A, magma_int_t lda, double* s, double* U, magma_int_t ldu, double* VT, magma_int_t ldvt, double* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaSvd<float>( magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A, magma_int_t lda, float* s, float* U, magma_int_t ldu, float* VT, magma_int_t ldvt, float* work, magma_int_t lwork, magma_int_t* iwork, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<double>( magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolve<float>( magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) { MagmaStreamSyncGuard guard; magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } template<> void magmaLuSolveBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue()); AT_CUDA_CHECK(cudaGetLastError()); } #endif #define ALLOCATE_ARRAY(name, type, size) \ auto storage_##name = pin_memory<type>(size); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); if (b.dim() == 2) { auto ipiv = at::empty({n}, at::kInt); magma_int_t info = 0; magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(), b_data, n, &info); infos[0] = info; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaSolveBatched<scalar_t>( n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaSolveBatched<scalar_t>( n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); std::vector<int64_t> infos(batchCount(self), 0); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{ apply_solve<scalar_t>(self_working_copy, A_working_copy, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "solve_cuda"); } else { singleCheckErrors(infos[0], "solve_cuda"); } return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(self.get_device()); magmaLuBatched<scalar_t>( n, n, self_array, n, ipiv_array, info_array, batch_size, magma_queue); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; scalar_t** self_inv_array_cur = &self_inv_array[mini_idx]; magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaGetriBatched<scalar_t>( n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaGetriBatched<scalar_t>( n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } template <typename scalar_t> static void apply_single_inverse(Tensor& self, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n); magma_int_t info_tmp = 0; Tensor ipiv = at::empty({n}, at::kInt); Tensor dwork = at::empty({lwork}, self.options()); magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp); if (info_tmp != 0) { info = info_tmp; return; } magmaGetri<scalar_t>( n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp); info = info_tmp; #endif } Tensor _inverse_helper_cuda(const Tensor& self) { auto self_inv_working_copy = cloneBatchedColumnMajor(self); if (self.dim() > 2) { std::vector<int64_t> infos(batchCount(self), 0); auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse_cuda"); } else { int64_t info = 0; AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse<scalar_t>(self_inv_working_copy, info); }); singleCheckErrors(info, "inverse_cuda"); } return self_inv_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("cholesky_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); int info_tmp = 0; if (b.dim() == 2) { magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n, b_data, n, &info_tmp); info = info_tmp; } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, A_array_cur, n, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaCholeskySolveBatched<scalar_t>( uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{ apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info); }); TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); if (self.dim() == 2) { magma_int_t info = 0; magmaCholesky<scalar_t>(uplo, n, self_data, n, &info); infos[0] = info; } else { auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); constexpr int64_t batch_limit = 262140; // Compute as many batches of 262140 possible // 262140 is the size of the largest batch of matrices that can be run with // violating maximum kernel configuration // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** self_array_cur = &self_array[mini_idx]; magma_int_t* info_array_cur = &info_array[mini_idx]; magmaCholeskyBatched<scalar_t>( uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaCholeskyBatched<scalar_t>( uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue); } for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } } #endif } Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor self_working_copy; if (upper) { self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2)); } else { self_working_copy = cloneBatchedColumnMajor(self); } AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{ apply_cholesky<scalar_t>(self_working_copy, false, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "cholesky_cuda"); } else { singleCheckErrors(infos[0], "cholesky_cuda"); } if (upper) { return self_working_copy.transpose(-1, -2); } else { return self_working_copy; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) { #ifndef USE_MAGMA AT_ERROR("lu: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_int_t k = std::min(m, n); if (self.dim() == 2) { // If `pivots` is defined, then we have to compute them. // magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute // the partially-pivoted LU decomposition with / without pivots. // The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots. // The data is later copied back to the appropriate output tensor. Tensor info_tmp = at::zeros({}, at::kInt); if (get_pivots) { Tensor piv_tmp = at::empty({k}, at::kInt); magmaLu<scalar_t>( m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>()); pivots.copy_(piv_tmp); } else { magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>()); } infos.copy_(info_tmp); } else { auto self_matrix_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); scalar_t** self_array; ALLOCATE_ARRAY(self_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_matrix_stride]; } MAGMAQueue magma_queue(self.get_device()); // Same comment as in the case of single matrix above. if (get_pivots) { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto pivots_matrix_stride = pivots.size(-1); magma_int_t** pivots_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_matrix_stride]; } magmaLuBatched<scalar_t>( m, n, self_array, m, pivots_array, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } else { magmaLuNoPivBatched<scalar_t>( m, n, self_array, m, infos.data_ptr<magma_int_t>(), batch_size, magma_queue); } } #endif } std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) { TORCH_CHECK(self.dim() >= 2, "expected tensor with 2 or more dimensions, got size: ", self.sizes(), " instead"); auto m = self.size(-2); auto n = self.size(-1); auto k = std::min(m, n); auto req_size = self.sizes().vec(); req_size.pop_back(); req_size.back() = k; Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous(); req_size.pop_back(); auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt)); Tensor self_working_copy; if (self.numel() == 0) { self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } else { self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{ apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot); if (self.dim() > 2 && pivot && m == n && m <= 32) { /* The magma implementation of small singular square batch matrices has a bug that results nan values in the LU factorization results, see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on TODO: disable this block for magma versions that implement a bug fix */ auto batch_size = infos_tensor.numel(); auto infos_array = infos_tensor.view({batch_size}); auto infos_cpu = infos_array.to(at::kCPU); auto infos_data = infos_cpu.data_ptr<int>(); auto input_array = self.view({batch_size, m, n}); auto working_array = self_working_copy.view({batch_size, m, n}); auto pivots_array = pivots_tensor.view({batch_size, k}); for (int64_t i = 0; i < batch_size; i++) { auto info = infos_data[i]; if (info > 0) { /* We'll recompute LU factorization of singular matrices using the non-batch implementation to workaround the magma bug (magma issue 13). */ working_array[i].copy_(input_array[i]); auto matrix = working_array[i]; auto pivots = pivots_array[i]; auto infos = infos_array[i]; apply_lu<scalar_t>(matrix, pivots, infos, pivot); } } } }); } if (check_errors) { if (self.dim() == 2) { singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true); } else { batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true); } } return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) { #ifndef USE_MAGMA AT_ERROR("triangular_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans; magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit; auto A_data = A.data_ptr<scalar_t>(); auto b_data = b.data_ptr<scalar_t>(); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); // batch_size == 1 implies that: // 1. the RHS and LHS tensors have 2 dimensions, or // 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1 if (batch_size == 1) { magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n); } else { auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(A_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** A_array_cur = &A_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, A_array_cur, n, b_array_cur, n, batch_limit, magma_queue); } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0) { magmaTriangularSolveBatched<scalar_t>( uplo, trans, diag, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue); } } #endif } std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper, bool transpose, bool unitriangular) { auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{ apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular); }); return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("qr: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto q_data = Q.data_ptr<scalar_t>(); auto r_data = R.data_ptr<scalar_t>(); auto q_matrix_stride = matrixStride(Q); auto r_matrix_stride = matrixStride(R); magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)"); magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)"); magma_int_t k = m < n ? m : n; magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n); int64_t batch_size = batchCount(R); // magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors. // The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors. Tensor tau = at::empty({k}, Q.options().device(at::kCPU)); Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options()); scalar_t* tau_data = tau.data_ptr<scalar_t>(); scalar_t* work_data = work.data_ptr<scalar_t>(); // This phase computes R (the raw version) // This uses MAGMA's ?geqrf2_gpu function magma_int_t info = 0; for (int64_t i = 0; i < batch_size; i++) { scalar_t* r_working_ptr = &r_data[i * r_matrix_stride]; magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true); infos[i] = info; if (info != 0) { return; } } // This phase computes Q (the raw version) // We require to perform ?geqrf_gpu again due to this bug in MAGMA: // - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly. // - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu // Refer to the below link for more details: // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 for (int64_t i = 0; i < batch_size; i++) { scalar_t* q_working_ptr = &q_data[i * q_matrix_stride]; magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false); infos[i] = info; if (info != 0) { return; } magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) { std::vector<int64_t> infos(batchCount(self), 0); // Setup input geometry and inputs for apply_qr std::vector<int64_t> q_sizes, q_strides; int64_t n_columns_q; std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some); Tensor q_working_copy, r_working_copy; // If there are no elements, then we simply return a pair of tensors of required dimensions if (self.numel() == 0) { // Fix the number of columns of q_working_copy appropriately q_sizes[self.dim() - 1] = n_columns_q; q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options()); q_working_copy = q_working_copy.expand_as(q_working_copy); // We repurpose the same q_sizes for r_working_copy // Fix the number of rows and columns of q_working_copy appropriately q_sizes[self.dim() - 1] = self.size(-1); q_sizes[self.dim() - 2] = n_columns_q; r_working_copy = at::empty(q_sizes, self.options()); return std::make_tuple(q_working_copy, r_working_copy); } q_working_copy = at::empty_strided(q_sizes, q_strides, self.options()); q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self); r_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{ apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "qr_cuda"); } else { singleCheckErrors(infos[0], "qr_cuda"); } return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q), r_working_copy.narrow(-2, 0, n_columns_q).triu()); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("symeig: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto eigvals_data = eigvals.data_ptr<scalar_t>(); auto self_matrix_stride = matrixStride(self); auto eigvals_stride = eigvals.size(-1); int64_t batch_size = batchCount(self); magma_int_t n = magma_int_cast(self.size(-1), "n"); magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec; scalar_t* wA; ALLOCATE_ARRAY(wA, scalar_t, n * n); magma_int_t info; // Run once, first to get the optimum work sizes. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t liwork = -1; magma_int_t iwkopt; magmaSymeig<scalar_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &iwkopt, liwork, &info); scalar_t* work; magma_int_t* iwork; lwork = magma_int_cast(wkopt, "work_size"); liwork = magma_int_cast(iwkopt, "iwork_size"); ALLOCATE_ARRAY(work, scalar_t, lwork); ALLOCATE_ARRAY(iwork, magma_int_t, liwork); for (int64_t i = 0; i < batch_size; i++) { scalar_t* self_working_ptr = &self_data[i * self_matrix_stride]; scalar_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride]; magmaSymeig<scalar_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr, wA, n, work, lwork, iwork, liwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); auto self_sizes = self.sizes().vec(); self_sizes.pop_back(); // magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors. // The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues. // The data is later moved to the appropriate device. // In the case where self.numel() == 0, we just return an empty tensor of // dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)") auto eigvals_working_copy = self.numel() == 0 ? at::empty(self_sizes, self.options()) : at::empty(self_sizes, self.options().device(at::kCPU)); if (self.numel() == 0) { return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT)); } auto self_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "symeig_cuda", [&]{ apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "symeig_cuda"); } else { singleCheckErrors(infos[0], "symeig_cuda"); } if (eigenvectors) { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy); } else { return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options())); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<typename scalar_t> static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT, char jobchar, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("svd: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data_ptr<scalar_t>(); auto U_data = U.data_ptr<scalar_t>(); auto S_data = S.data_ptr<scalar_t>(); auto VT_data = VT.data_ptr<scalar_t>(); auto self_stride = matrixStride(self); auto U_stride = matrixStride(U); auto S_stride = S.size(-1); auto VT_stride = matrixStride(VT); auto batchsize = batchCount(self); magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec); magma_int_t m = magma_int_cast(self.size(-2), "m"); magma_int_t n = magma_int_cast(self.size(-1), "n"); auto k = std::min(m, n); magma_int_t info = 0; // Run once, first to get the optimum work size. // Since we deal with batches of matrices with the same dimensions, doing this outside // the loop saves (batch_size - 1) workspace queries which would provide the same result // and (batch_size - 1) calls to allocate and deallocate workspace using at::empty() magma_int_t lwork = -1; scalar_t wkopt; magma_int_t* iwork; ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k); magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info); lwork = magma_int_cast(wkopt, "work_size"); scalar_t* work; ALLOCATE_ARRAY(work, scalar_t, lwork); for (int64_t i = 0; i < batchsize; i++) { scalar_t* self_working_ptr = &self_data[i * self_stride]; scalar_t* S_working_ptr = &S_data[i * S_stride]; scalar_t* U_working_ptr = &U_data[i * U_stride]; scalar_t* VT_working_ptr = &VT_data[i * VT_stride]; // Compute S, U (optionally), VT (optionally) magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m, S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info); infos[i] = info; if (info != 0) { return; } } #endif } std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) { std::vector<int64_t> infos(batchCount(self), 0); int64_t m = self.size(-2), n = self.size(-1); int64_t k = std::min(m, n); char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N'; Tensor U_working_copy, S_working_copy, VT_working_copy; std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv); if (self.numel() > 0) { // The input matrix, U, S and VT have to reside in pinned memory. // Additionally, the input and U have to be in column major format. // _create_U_S_VT takes care of a part of these requirements (for U, S and VT) // For the input matrix, this requirements are being taken care of below. // Specify strides auto self_col_major_strides = at::detail::defaultStrides(self.sizes()); self_col_major_strides[self.dim() - 2] = 1; self_col_major_strides[self.dim() - 1] = m; // Create strided tensor in pinned memory auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides, at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true)); self_working_copy.copy_(self); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{ apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos); }); if (self.dim() > 2) { batchCheckErrors(infos, "svd_cuda"); } else { singleCheckErrors(infos[0], "svd_cuda"); } U_working_copy = same_stride_to(U_working_copy, self.options()); S_working_copy = same_stride_to(S_working_copy, self.options()); VT_working_copy = same_stride_to(VT_working_copy, self.options()); if (compute_uv) { if (some) { VT_working_copy = VT_working_copy.narrow(-1, 0, k); } } else { VT_working_copy.zero_(); U_working_copy.zero_(); } } else { U_working_copy = same_stride_to(U_working_copy, self.options()).zero_(); S_working_copy = same_stride_to(S_working_copy, self.options()); VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_(); } return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("lu_solve: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto b_data = b.data_ptr<scalar_t>(); auto lu_data = lu.data_ptr<scalar_t>(); auto n = lu.size(-2); auto nrhs = b.size(-1); int info_tmp = 0; if (b.dim() == 2) { Tensor pivots_tmp = pivots.cpu(); magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp); info = info_tmp; } else { auto pivots_data = pivots.data_ptr<magma_int_t>(); auto b_stride = matrixStride(b); auto lu_stride = matrixStride(lu); auto pivots_stride = pivots.size(-1); magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount"); magma_int_t** pivots_array; scalar_t** lu_array; scalar_t** b_array; ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size); ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size); for (int64_t i = 0; i < batch_size; i++) { pivots_array[i] = &pivots_data[i * pivots_stride]; b_array[i] = &b_data[i * b_stride]; lu_array[i] = &lu_data[i * lu_stride]; } MAGMAQueue magma_queue(b.get_device()); constexpr int64_t batch_limit = 65535; // Compute as many batches of 65535 possible // The number of "mini"-batches are floor(batch_size / batch_limit) // and these cover floor(batch_size / batch_limit) * batch_limit matrix solves int64_t mini_batches = batch_size / batch_limit, mini_idx; for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) { scalar_t** lu_array_cur = &lu_array[mini_idx]; scalar_t** b_array_cur = &b_array[mini_idx]; magma_int_t** pivots_array_cur = &pivots_array[mini_idx]; magmaLuSolveBatched<scalar_t>( n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n, info_tmp, batch_limit, magma_queue); if (info_tmp != 0) { break; } } // Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit // which concisely is equal to batch_size % batch_limit if (batch_size % batch_limit != 0 && info_tmp == 0) { magmaLuSolveBatched<scalar_t>( n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n, info_tmp, batch_size % batch_limit, magma_queue); } info = info_tmp; } #endif } Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data); auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous(); if (self.numel() == 0 || LU_data.numel() == 0) { return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{ apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info); }); TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info); return self_working_copy; } }} // namespace at::native #undef ALLOCATE_ARRAY
466c75c07782850abaef0d2d44273fac5af319f5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by search_multi_cta_00_generate.py * * Make changes there and run in this directory: * * > python search_multi_cta_00_generate.py * */ #include <raft/neighbors/detail/cagra/search_multi_cta_kernel-inl.cuh> namespace raft::neighbors::cagra::detail::multi_cta_search { #define instantiate_kernel_selection(TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \ template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \ raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \ raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \ INDEX_T* const topk_indices_ptr, \ DISTANCE_T* const topk_distances_ptr, \ const DATA_T* const queries_ptr, \ const uint32_t num_queries, \ const INDEX_T* dev_seed_ptr, \ uint32_t* const num_executed_iterations, \ uint32_t topk, \ uint32_t block_size, \ uint32_t result_buffer_size, \ uint32_t smem_size, \ int64_t hash_bitlen, \ INDEX_T* hashmap_ptr, \ uint32_t num_cta_per_query, \ uint32_t num_random_samplings, \ uint64_t rand_xor_mask, \ uint32_t num_seeds, \ size_t itopk_size, \ size_t search_width, \ size_t min_iterations, \ size_t max_iterations, \ hipStream_t stream); instantiate_kernel_selection(32, 1024, int8_t, uint32_t, float); #undef instantiate_kernel_selection } // namespace raft::neighbors::cagra::detail::multi_cta_search
466c75c07782850abaef0d2d44273fac5af319f5.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by search_multi_cta_00_generate.py * * Make changes there and run in this directory: * * > python search_multi_cta_00_generate.py * */ #include <raft/neighbors/detail/cagra/search_multi_cta_kernel-inl.cuh> namespace raft::neighbors::cagra::detail::multi_cta_search { #define instantiate_kernel_selection(TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \ template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \ raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \ raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \ INDEX_T* const topk_indices_ptr, \ DISTANCE_T* const topk_distances_ptr, \ const DATA_T* const queries_ptr, \ const uint32_t num_queries, \ const INDEX_T* dev_seed_ptr, \ uint32_t* const num_executed_iterations, \ uint32_t topk, \ uint32_t block_size, \ uint32_t result_buffer_size, \ uint32_t smem_size, \ int64_t hash_bitlen, \ INDEX_T* hashmap_ptr, \ uint32_t num_cta_per_query, \ uint32_t num_random_samplings, \ uint64_t rand_xor_mask, \ uint32_t num_seeds, \ size_t itopk_size, \ size_t search_width, \ size_t min_iterations, \ size_t max_iterations, \ cudaStream_t stream); instantiate_kernel_selection(32, 1024, int8_t, uint32_t, float); #undef instantiate_kernel_selection } // namespace raft::neighbors::cagra::detail::multi_cta_search
67ea0375a91553c167b7d6b7f77c68026b06178f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright: See LICENSE file that comes with this distribution * */ #include "kernel_t.h" __global__ void global_function (const int i, int * j) { *j = i; } kernelT get_kernel() { return global_function; }
67ea0375a91553c167b7d6b7f77c68026b06178f.cu
/* * Copyright: See LICENSE file that comes with this distribution * */ #include "kernel_t.h" __global__ void global_function (const int i, int * j) { *j = i; } kernelT get_kernel() { return global_function; }
b08a319290302c25b4eecd29de8edefd0ac0bfd9.hip
// !!! This is a file automatically generated by hipify!!! /* Impulse based Rigid body simulation using CUDA Copyright (c) 2007 Takahiro Harada http://www.iii.u-tokyo.ac.jp/~takahiroharada/projects/impulseCUDA.html This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include <cstdlib> #include <cstdio> #include <string.h> #include "cutil_math.h" #include "math_constants.h" #include <hip/hip_vector_types.h> #include "btCudaDefines.h" #include "../../src/BulletMultiThreaded/btGpuUtilsSharedDefs.h" #include "../../Demos/Gpu2dDemo/btGpuDemo2dSharedTypes.h" #include "../../Demos/Gpu2dDemo/btGpuDemo2dSharedDefs.h" texture<float4, 1, hipReadModeElementType> posTex; #include "../../Demos/Gpu2dDemo/btGpuDemo2dSharedCode.h"
b08a319290302c25b4eecd29de8edefd0ac0bfd9.cu
/* Impulse based Rigid body simulation using CUDA Copyright (c) 2007 Takahiro Harada http://www.iii.u-tokyo.ac.jp/~takahiroharada/projects/impulseCUDA.html This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include <cstdlib> #include <cstdio> #include <string.h> #include "cutil_math.h" #include "math_constants.h" #include <vector_types.h> #include "btCudaDefines.h" #include "../../src/BulletMultiThreaded/btGpuUtilsSharedDefs.h" #include "../../Demos/Gpu2dDemo/btGpuDemo2dSharedTypes.h" #include "../../Demos/Gpu2dDemo/btGpuDemo2dSharedDefs.h" texture<float4, 1, cudaReadModeElementType> posTex; #include "../../Demos/Gpu2dDemo/btGpuDemo2dSharedCode.h"
f7fad914fb0a408e88c6d8df66ef84ef60cb0c87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void count_spikes(const double *Params, const int *id, int *nsp){ int tid, tind, bid, ind, Nspikes, Nfilters, Nthreads, Nblocks; Nspikes = (int) Params[0]; Nfilters = (int) Params[2]; tid = threadIdx.x; bid = blockIdx.x; Nthreads = blockDim.x; Nblocks = gridDim.x; tind = tid + Nthreads *bid; while (tind<Nfilters){ for(ind=0; ind<Nspikes;ind++) if (id[ind]==tind) nsp[tind] += 1; tind += Nthreads * Nblocks; } }
f7fad914fb0a408e88c6d8df66ef84ef60cb0c87.cu
#include "includes.h" __global__ void count_spikes(const double *Params, const int *id, int *nsp){ int tid, tind, bid, ind, Nspikes, Nfilters, Nthreads, Nblocks; Nspikes = (int) Params[0]; Nfilters = (int) Params[2]; tid = threadIdx.x; bid = blockIdx.x; Nthreads = blockDim.x; Nblocks = gridDim.x; tind = tid + Nthreads *bid; while (tind<Nfilters){ for(ind=0; ind<Nspikes;ind++) if (id[ind]==tind) nsp[tind] += 1; tind += Nthreads * Nblocks; } }
ef737eae4cbeefea613aa0992a5bfd2bb3bd7dfc.hip
// !!! This is a file automatically generated by hipify!!! #include "rsbench_hip.cuh" int main(int argc, char * argv[]) { // ===================================================================== // Initialization & Command Line Read-In // ===================================================================== int version = 12; double start, stop; // Process CLI Fields Input input = read_CLI( argc, argv ); // ===================================================================== // Print-out of Input Summary // ===================================================================== logo(version); center_print("INPUT SUMMARY", 79); border_print(); print_input_summary(input); // ===================================================================== // Intialize Simulation Data Structures // ===================================================================== border_print(); center_print("INITIALIZATION", 79); border_print(); start = get_time(); SimulationData SD = initialize_simulation( input ); SimulationData GSD = move_simulation_data_to_device( input, SD ); hipDeviceSetLimit(hipLimitMallocHeapSize, 1*1024*1024*1024); stop = get_time(); printf("Initialization Complete. (%.2lf seconds)\n", stop-start); // ===================================================================== // Cross Section (XS) Parallel Lookup Simulation Begins // ===================================================================== border_print(); center_print("SIMULATION", 79); border_print(); unsigned long vhash = 0; // Run Simulation start = get_time(); // Run simulation if( input.simulation_method == EVENT_BASED ) { if( input.kernel_id == 0 ) run_event_based_simulation(input, GSD, SD, &vhash ); else if( input.kernel_id == 1 ) run_event_based_simulation_optimization_1(input, GSD, &vhash ); else { printf("Error: No kernel ID %d found!\n", input.kernel_id); exit(1); } } else if( input.simulation_method == HISTORY_BASED ) { printf("History-based simulation not implemented in OpenMP offload code. Instead,\nuse the event-based method with \"-m event\" argument.\n"); exit(1); } stop = get_time(); // Final hash step vhash = vhash % 999983; printf("Simulation Complete.\n"); // ===================================================================== // Print / Save Results and Exit // ===================================================================== border_print(); center_print("RESULTS", 79); border_print(); int is_invalid = validate_and_print_results(input, stop-start, vhash); border_print(); return is_invalid; }
ef737eae4cbeefea613aa0992a5bfd2bb3bd7dfc.cu
#include "rsbench.cuh" int main(int argc, char * argv[]) { // ===================================================================== // Initialization & Command Line Read-In // ===================================================================== int version = 12; double start, stop; // Process CLI Fields Input input = read_CLI( argc, argv ); // ===================================================================== // Print-out of Input Summary // ===================================================================== logo(version); center_print("INPUT SUMMARY", 79); border_print(); print_input_summary(input); // ===================================================================== // Intialize Simulation Data Structures // ===================================================================== border_print(); center_print("INITIALIZATION", 79); border_print(); start = get_time(); SimulationData SD = initialize_simulation( input ); SimulationData GSD = move_simulation_data_to_device( input, SD ); cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1*1024*1024*1024); stop = get_time(); printf("Initialization Complete. (%.2lf seconds)\n", stop-start); // ===================================================================== // Cross Section (XS) Parallel Lookup Simulation Begins // ===================================================================== border_print(); center_print("SIMULATION", 79); border_print(); unsigned long vhash = 0; // Run Simulation start = get_time(); // Run simulation if( input.simulation_method == EVENT_BASED ) { if( input.kernel_id == 0 ) run_event_based_simulation(input, GSD, SD, &vhash ); else if( input.kernel_id == 1 ) run_event_based_simulation_optimization_1(input, GSD, &vhash ); else { printf("Error: No kernel ID %d found!\n", input.kernel_id); exit(1); } } else if( input.simulation_method == HISTORY_BASED ) { printf("History-based simulation not implemented in OpenMP offload code. Instead,\nuse the event-based method with \"-m event\" argument.\n"); exit(1); } stop = get_time(); // Final hash step vhash = vhash % 999983; printf("Simulation Complete.\n"); // ===================================================================== // Print / Save Results and Exit // ===================================================================== border_print(); center_print("RESULTS", 79); border_print(); int is_invalid = validate_and_print_results(input, stop-start, vhash); border_print(); return is_invalid; }
524cd01f80c7f55cc174cf8d2e9711d778d8ba68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. // // batch_normalization.cu - GPU helper routines for batch normalization layer //////////////////////////////////////////////////////////////////////////////// #include "math.h" #include <iostream> #include "lbann/layers/regularizers/batch_normalization_cuda.hpp" #include "lbann/utils/exception.hpp" // Macros to check CUDA calls #define FORCE_CHECK_CUDA(cuda_call) \ do { \ const hipError_t status = cuda_call; \ if (status != hipSuccess) { \ std::cerr << "CUDA error: " << hipGetErrorString(status) << "\n"; \ std::cerr << "Error at " << __FILE__ << ":" << __LINE__ << "\n"; \ hipDeviceReset(); \ throw lbann::lbann_exception("CUDA error"); \ } \ } while (0) #ifdef LBANN_DEBUG #define CHECK_CUDA(cuda_call) FORCE_CHECK_CUDA(cuda_call) #else #define CHECK_CUDA(cuda_call) cuda_call #endif // #ifdef LBANN_DEBUG // Atomic add functions #if __CUDA_ARCH__ >= 530 __device__ inline __half atomic_add(__half* address, __half val) { #if 0 // TODO: replace this once Nvidia implements atomicAdd for __half return atomicAdd(address, val); #else unsigned int* address_as_uint = (unsigned int*) address; unsigned int old = *address_as_uint; __half* old_as_half = (__half*) &old; unsigned int assumed; unsigned int updated; __half* updated_as_half = (__half*) &updated; do { assumed = old; updated = old; *updated_as_half += value; old = atomicCAS(address_as_uint, assumed, updated); } while (assumed != old); return *old_as_half; #endif // 0 } #endif // __CUDA_ARCH__ >= 530 __device__ inline float atomic_add(float* address, float val) { return atomicAdd(address, val); } __device__ inline double atomic_add(double* address, double val) { #if __CUDA_ARCH__ >= 600 return atomicAdd(address, val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); #endif // __CUDA_ARCH__ < 600 } // Reciprocal square root functions #if __CUDA_ARCH__ >= 530 __device__ inline float reciprocal_square_root(__half x) { return hrsqrt(x); } #endif // __CUDA_ARCH__ >= 530 __device__ inline float reciprocal_square_root(float x) { return rsqrtf(x); } __device__ inline double reciprocal_square_root(double x) { return rsqrt(x); } namespace lbann { namespace batch_normalization_cuda { template <int block_size> __global__ void channel_sums_and_sqsums_kernel( int height, int width, int channel_size, const DataType * __restrict__ global_data, int data_ldim, DataType * __restrict__ global_sums, DataType * __restrict__ global_sqsums) { // Indices const int tid = threadIdx.x; const int gidx = threadIdx.x + blockIdx.x * blockDim.x; const int bidy = blockIdx.y; // Initialize shared memory __shared__ DataType shared_sums[block_size]; __shared__ DataType shared_sqsums[block_size]; // Compute row sums in shared memory DataType sum = DataType(0); DataType sqsum = DataType(0); if(gidx < channel_size) { const int row = gidx + bidy * channel_size; for(int col = 0; col < width; ++col) { const DataType x = global_data[row + col * data_ldim]; sum += x; sqsum += x * x; } } shared_sums[tid] = sum; shared_sqsums[tid] = sqsum; // Compute channel sum with shared memory reduction // TODO: unroll loops for(int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if(tid < stride) { shared_sums[tid] += shared_sums[tid + stride]; shared_sqsums[tid] += shared_sqsums[tid + stride]; } } // Output channel sum to global memory if(tid == 0) { atomic_add(&global_sums[bidy], shared_sums[0]); atomic_add(&global_sqsums[bidy], shared_sqsums[0]); } } void channel_sums_and_sqsums(int height, int width, int num_channels, const DataType *data_d, int data_ldim, DataType *sums_d, DataType *sqsums_d, hipStream_t stream) { // CUDA block size const int block_size = 256; // Clear GPU memory CHECK_CUDA(hipMemsetAsync(sums_d, 0, num_channels * sizeof(DataType), stream)); CHECK_CUDA(hipMemsetAsync(sqsums_d, 0, num_channels * sizeof(DataType), stream)); // Return if there is no input data if(width <= 0) return; // Launch CUDA kernel to compute sums and sums of squares const int channel_size = height / num_channels; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; hipLaunchKernelGGL(( channel_sums_and_sqsums_kernel<block_size>) , dim3(grid_dims), dim3(block_dims), 0, stream, height, width, channel_size, data_d, data_ldim, sums_d, sqsums_d); } __global__ void sums_to_statistics_kernel( int num_entries, DataType samples_per_sum, DataType decay, DataType * __restrict__ global_mean, DataType * __restrict__ global_var, DataType * __restrict__ global_running_mean, DataType * __restrict__ global_running_var) { int gid = threadIdx.x + blockIdx.x * blockDim.x; while(gid < num_entries) { // Compute statistics const DataType mean = global_mean[gid] / samples_per_sum; const DataType sqmean = global_var[gid] / samples_per_sum; DataType var = sqmean - mean * mean; var = var > DataType(0) ? var : DataType(0); var *= samples_per_sum / (samples_per_sum - DataType(1)); global_mean[gid] = mean; global_var[gid] = var; // Compute running statistics DataType& running_mean = global_running_mean[gid]; DataType& running_var = global_running_var[gid]; running_mean = decay * running_mean + (DataType(1) - decay) * mean; running_var = decay * running_var + (DataType(1) - decay) * var; gid += blockDim.x * gridDim.x; } } void sums_to_statistics(int num_entries, int samples_per_sum, DataType decay, DataType *mean_d, DataType *var_d, DataType *running_mean_d, DataType *running_var_d, hipStream_t stream) { dim3 block_dims, grid_dims; block_dims.x = 256; grid_dims.x = (num_entries + block_dims.x - 1) / block_dims.x; hipLaunchKernelGGL(( sums_to_statistics_kernel) , dim3(grid_dims), dim3(block_dims), 0, stream, num_entries, (DataType)samples_per_sum, decay, mean_d, var_d, running_mean_d, running_var_d); } template <int block_size> __global__ void batch_normalization_kernel( int height, int width, int channel_size, const DataType * __restrict__ global_input, int input_ldim, const DataType * __restrict__ global_mean, const DataType * __restrict__ global_var, DataType epsilon, const DataType * __restrict__ global_scale, const DataType * __restrict__ global_bias, DataType * __restrict__ global_output, int output_ldim) { // Indices const int gidx = threadIdx.x + blockIdx.x * blockDim.x; const int bidy = blockIdx.y; // Copy batch normalization parameters to private memory const DataType mean = global_mean[bidy]; const DataType var = global_var[bidy]; const DataType scale = global_scale[bidy]; const DataType bias = global_bias[bidy]; // Get reciprocal of standard deviation const DataType inv_stdev = reciprocal_square_root(var + epsilon); // Apply batch normalization if(gidx < channel_size) { const int row = gidx + bidy * channel_size; for(int col = 0; col < width; ++col) { const DataType x = global_input[row + col * input_ldim]; const DataType xhat = (x - mean) * inv_stdev; const DataType y = scale * xhat + bias; global_output[row + col * output_ldim] = y; } } } void batch_normalization(int height, int width, int num_channels, const DataType *input_d, int input_ldim, const DataType *mean_d, const DataType *var_d, DataType epsilon, const DataType *scale_d, const DataType *bias_d, DataType *output_d, int output_ldim, hipStream_t stream) { // CUDA block size const int block_size = 256; // Return if there is no input data if(width <= 0) return; // Launch CUDA kernel to apply batch normalization const int channel_size = height / num_channels; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; hipLaunchKernelGGL(( batch_normalization_kernel<block_size>) , dim3(grid_dims), dim3(block_dims), 0, stream, height, width, channel_size, input_d, input_ldim, mean_d, var_d, epsilon, scale_d, bias_d, output_d, output_ldim); } template <int block_size> __global__ void batch_normalization_backprop1_kernel( int height, int width, int channel_size, const DataType * __restrict__ global_input, int input_ldim, const DataType * __restrict__ global_gradient_wrt_output, int gradient_wrt_output_ldim, const DataType * __restrict__ global_mean, const DataType * __restrict__ global_var, DataType epsilon, const DataType * __restrict__ global_scale, DataType * __restrict__ global_dscale, DataType * __restrict__ global_dbias, DataType * __restrict__ global_dmean, DataType * __restrict__ global_dvar) { // Indices const int tid = threadIdx.x; const int gidx = threadIdx.x + blockIdx.x * blockDim.x; const int bidy = blockIdx.y; // Initialize shared memory __shared__ DataType shared_dscale[block_size]; __shared__ DataType shared_dbias[block_size]; __shared__ DataType shared_dmean[block_size]; __shared__ DataType shared_dvar[block_size]; // Copy batch normalization parameters to private memory const DataType mean = global_mean[bidy]; const DataType var = global_var[bidy]; const DataType scale = global_scale[bidy]; // Compute useful constants const DataType inv_stdev = reciprocal_square_root(var + epsilon); const DataType dvar_factor = inv_stdev * inv_stdev * inv_stdev / 2; // Compute row-wise gradient contributions in shared memory DataType dscale = DataType(0); DataType dbias = DataType(0); DataType dmean = DataType(0); DataType dvar = DataType(0); if(gidx < channel_size) { const int row = gidx + bidy * channel_size; for(int col = 0; col < width; ++col) { const DataType x = global_input[row + col * input_ldim]; const DataType xhat = (x - mean) * inv_stdev; const DataType dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; dscale += dy * xhat; dbias += dy; const DataType dxhat = dy * scale; dmean += - dxhat * inv_stdev; dvar += - dxhat * (x - mean) * dvar_factor; } } shared_dscale[tid] = dscale; shared_dbias[tid] = dbias; shared_dmean[tid] = dmean; shared_dvar[tid] = dvar; // Compute gradients with shared memory reduction // TODO: unroll loops for(int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if(tid < stride) { shared_dscale[tid] += shared_dscale[tid + stride]; shared_dbias[tid] += shared_dbias[tid + stride]; shared_dmean[tid] += shared_dmean[tid + stride]; shared_dvar[tid] += shared_dvar[tid + stride]; } } // Output channel sum to global memory if(tid == 0) { atomic_add(&global_dscale[bidy], shared_dscale[0]); atomic_add(&global_dbias[bidy], shared_dbias[0]); atomic_add(&global_dmean[bidy], shared_dmean[0]); atomic_add(&global_dvar[bidy], shared_dvar[0]); } } void batch_normalization_backprop1(int height, int width, int num_channels, const DataType *input_d, int input_ldim, const DataType *gradient_wrt_output_d, int gradient_wrt_output_ldim, const DataType *mean_d, const DataType *var_d, DataType epsilon, const DataType *scale_d, DataType *dscale_d, DataType *dbias_d, DataType *dmean_d, DataType *dvar_d, hipStream_t stream) { // CUDA block size const int block_size = 256; // Clear GPU memory CHECK_CUDA(hipMemsetAsync(dscale_d, 0, num_channels * sizeof(DataType), stream)); CHECK_CUDA(hipMemsetAsync(dbias_d, 0, num_channels * sizeof(DataType), stream)); CHECK_CUDA(hipMemsetAsync(dmean_d, 0, num_channels * sizeof(DataType), stream)); CHECK_CUDA(hipMemsetAsync(dvar_d, 0, num_channels * sizeof(DataType), stream)); // Return if there is no input data if(width <= 0) return; // Launch CUDA kernel for first phase of batch normalization backward propagation const int channel_size = height / num_channels; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; hipLaunchKernelGGL(( batch_normalization_backprop1_kernel<block_size>) , dim3(grid_dims), dim3(block_dims), 0, stream, height, width, channel_size, input_d, input_ldim, gradient_wrt_output_d, gradient_wrt_output_ldim, mean_d, var_d, epsilon, scale_d, dscale_d, dbias_d, dmean_d, dvar_d); } template <int block_size> __global__ void batch_normalization_backprop2_kernel( int height, int local_width, int global_width, int channel_size, const DataType * __restrict__ global_input, int input_ldim, const DataType * __restrict__ global_gradient_wrt_output, int gradient_wrt_output_ldim, const DataType * __restrict__ global_mean, const DataType * __restrict__ global_var, DataType epsilon, const DataType * __restrict__ global_scale, const DataType * __restrict__ global_dmean, const DataType * __restrict__ global_dvar, DataType * __restrict__ global_gradient_wrt_input, int gradient_wrt_input_ldim) { // Indices const int gidx = threadIdx.x + blockIdx.x * blockDim.x; const int bidy = blockIdx.y; // Copy batch normalization parameters to private memory const DataType mean = global_mean[bidy]; const DataType var = global_var[bidy]; const DataType scale = global_scale[bidy]; const DataType dmean = global_dmean[bidy]; const DataType dvar = global_dvar[bidy]; // Compute useful constants const DataType inv_stdev = reciprocal_square_root(var + epsilon); const DataType dmean_term = dmean / (global_width * channel_size); const DataType dvar_term = dvar * 2 / (global_width * channel_size - 1); // Apply batch normalization if(gidx < channel_size) { const int row = gidx + bidy * channel_size; for(int col = 0; col < local_width; ++col) { const DataType x = global_input[row + col * input_ldim]; const DataType dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; const DataType dxhat = dy * scale; DataType dx = dxhat * inv_stdev; dx += dmean_term; dx += dvar_term * (x - mean); global_gradient_wrt_input[row + col * gradient_wrt_input_ldim] += dx; } } } void batch_normalization_backprop2(int height, int local_width, int global_width, int num_channels, const DataType *input_d, int input_ldim, const DataType *gradient_wrt_output_d, int gradient_wrt_output_ldim, const DataType *mean_d, const DataType *var_d, DataType epsilon, const DataType *scale_d, const DataType *dmean_d, const DataType *dvar_d, DataType *gradient_wrt_input_d, int gradient_wrt_input_ldim, hipStream_t stream) { // CUDA block size const int block_size = 256; // Return if there is no input data if(local_width <= 0) return; // Launch CUDA kernel for second phase of batch normalization backward propagation const int channel_size = height / num_channels; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; hipLaunchKernelGGL(( batch_normalization_backprop2_kernel<block_size>) , dim3(grid_dims), dim3(block_dims), 0, stream, height, local_width, global_width, channel_size, input_d, input_ldim, gradient_wrt_output_d, gradient_wrt_output_ldim, mean_d, var_d, epsilon, scale_d, dmean_d, dvar_d, gradient_wrt_input_d, gradient_wrt_input_ldim); } } // namespace batch_normalization } // namespace lbann
524cd01f80c7f55cc174cf8d2e9711d778d8ba68.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. // // batch_normalization.cu - GPU helper routines for batch normalization layer //////////////////////////////////////////////////////////////////////////////// #include "math.h" #include <iostream> #include "lbann/layers/regularizers/batch_normalization_cuda.hpp" #include "lbann/utils/exception.hpp" // Macros to check CUDA calls #define FORCE_CHECK_CUDA(cuda_call) \ do { \ const cudaError_t status = cuda_call; \ if (status != cudaSuccess) { \ std::cerr << "CUDA error: " << cudaGetErrorString(status) << "\n"; \ std::cerr << "Error at " << __FILE__ << ":" << __LINE__ << "\n"; \ cudaDeviceReset(); \ throw lbann::lbann_exception("CUDA error"); \ } \ } while (0) #ifdef LBANN_DEBUG #define CHECK_CUDA(cuda_call) FORCE_CHECK_CUDA(cuda_call) #else #define CHECK_CUDA(cuda_call) cuda_call #endif // #ifdef LBANN_DEBUG // Atomic add functions #if __CUDA_ARCH__ >= 530 __device__ inline __half atomic_add(__half* address, __half val) { #if 0 // TODO: replace this once Nvidia implements atomicAdd for __half return atomicAdd(address, val); #else unsigned int* address_as_uint = (unsigned int*) address; unsigned int old = *address_as_uint; __half* old_as_half = (__half*) &old; unsigned int assumed; unsigned int updated; __half* updated_as_half = (__half*) &updated; do { assumed = old; updated = old; *updated_as_half += value; old = atomicCAS(address_as_uint, assumed, updated); } while (assumed != old); return *old_as_half; #endif // 0 } #endif // __CUDA_ARCH__ >= 530 __device__ inline float atomic_add(float* address, float val) { return atomicAdd(address, val); } __device__ inline double atomic_add(double* address, double val) { #if __CUDA_ARCH__ >= 600 return atomicAdd(address, val); #else unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); #endif // __CUDA_ARCH__ < 600 } // Reciprocal square root functions #if __CUDA_ARCH__ >= 530 __device__ inline float reciprocal_square_root(__half x) { return hrsqrt(x); } #endif // __CUDA_ARCH__ >= 530 __device__ inline float reciprocal_square_root(float x) { return rsqrtf(x); } __device__ inline double reciprocal_square_root(double x) { return rsqrt(x); } namespace lbann { namespace batch_normalization_cuda { template <int block_size> __global__ void channel_sums_and_sqsums_kernel( int height, int width, int channel_size, const DataType * __restrict__ global_data, int data_ldim, DataType * __restrict__ global_sums, DataType * __restrict__ global_sqsums) { // Indices const int tid = threadIdx.x; const int gidx = threadIdx.x + blockIdx.x * blockDim.x; const int bidy = blockIdx.y; // Initialize shared memory __shared__ DataType shared_sums[block_size]; __shared__ DataType shared_sqsums[block_size]; // Compute row sums in shared memory DataType sum = DataType(0); DataType sqsum = DataType(0); if(gidx < channel_size) { const int row = gidx + bidy * channel_size; for(int col = 0; col < width; ++col) { const DataType x = global_data[row + col * data_ldim]; sum += x; sqsum += x * x; } } shared_sums[tid] = sum; shared_sqsums[tid] = sqsum; // Compute channel sum with shared memory reduction // TODO: unroll loops for(int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if(tid < stride) { shared_sums[tid] += shared_sums[tid + stride]; shared_sqsums[tid] += shared_sqsums[tid + stride]; } } // Output channel sum to global memory if(tid == 0) { atomic_add(&global_sums[bidy], shared_sums[0]); atomic_add(&global_sqsums[bidy], shared_sqsums[0]); } } void channel_sums_and_sqsums(int height, int width, int num_channels, const DataType *data_d, int data_ldim, DataType *sums_d, DataType *sqsums_d, cudaStream_t stream) { // CUDA block size const int block_size = 256; // Clear GPU memory CHECK_CUDA(cudaMemsetAsync(sums_d, 0, num_channels * sizeof(DataType), stream)); CHECK_CUDA(cudaMemsetAsync(sqsums_d, 0, num_channels * sizeof(DataType), stream)); // Return if there is no input data if(width <= 0) return; // Launch CUDA kernel to compute sums and sums of squares const int channel_size = height / num_channels; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; channel_sums_and_sqsums_kernel<block_size> <<<grid_dims, block_dims, 0, stream>>> (height, width, channel_size, data_d, data_ldim, sums_d, sqsums_d); } __global__ void sums_to_statistics_kernel( int num_entries, DataType samples_per_sum, DataType decay, DataType * __restrict__ global_mean, DataType * __restrict__ global_var, DataType * __restrict__ global_running_mean, DataType * __restrict__ global_running_var) { int gid = threadIdx.x + blockIdx.x * blockDim.x; while(gid < num_entries) { // Compute statistics const DataType mean = global_mean[gid] / samples_per_sum; const DataType sqmean = global_var[gid] / samples_per_sum; DataType var = sqmean - mean * mean; var = var > DataType(0) ? var : DataType(0); var *= samples_per_sum / (samples_per_sum - DataType(1)); global_mean[gid] = mean; global_var[gid] = var; // Compute running statistics DataType& running_mean = global_running_mean[gid]; DataType& running_var = global_running_var[gid]; running_mean = decay * running_mean + (DataType(1) - decay) * mean; running_var = decay * running_var + (DataType(1) - decay) * var; gid += blockDim.x * gridDim.x; } } void sums_to_statistics(int num_entries, int samples_per_sum, DataType decay, DataType *mean_d, DataType *var_d, DataType *running_mean_d, DataType *running_var_d, cudaStream_t stream) { dim3 block_dims, grid_dims; block_dims.x = 256; grid_dims.x = (num_entries + block_dims.x - 1) / block_dims.x; sums_to_statistics_kernel <<<grid_dims, block_dims, 0, stream>>> (num_entries, (DataType)samples_per_sum, decay, mean_d, var_d, running_mean_d, running_var_d); } template <int block_size> __global__ void batch_normalization_kernel( int height, int width, int channel_size, const DataType * __restrict__ global_input, int input_ldim, const DataType * __restrict__ global_mean, const DataType * __restrict__ global_var, DataType epsilon, const DataType * __restrict__ global_scale, const DataType * __restrict__ global_bias, DataType * __restrict__ global_output, int output_ldim) { // Indices const int gidx = threadIdx.x + blockIdx.x * blockDim.x; const int bidy = blockIdx.y; // Copy batch normalization parameters to private memory const DataType mean = global_mean[bidy]; const DataType var = global_var[bidy]; const DataType scale = global_scale[bidy]; const DataType bias = global_bias[bidy]; // Get reciprocal of standard deviation const DataType inv_stdev = reciprocal_square_root(var + epsilon); // Apply batch normalization if(gidx < channel_size) { const int row = gidx + bidy * channel_size; for(int col = 0; col < width; ++col) { const DataType x = global_input[row + col * input_ldim]; const DataType xhat = (x - mean) * inv_stdev; const DataType y = scale * xhat + bias; global_output[row + col * output_ldim] = y; } } } void batch_normalization(int height, int width, int num_channels, const DataType *input_d, int input_ldim, const DataType *mean_d, const DataType *var_d, DataType epsilon, const DataType *scale_d, const DataType *bias_d, DataType *output_d, int output_ldim, cudaStream_t stream) { // CUDA block size const int block_size = 256; // Return if there is no input data if(width <= 0) return; // Launch CUDA kernel to apply batch normalization const int channel_size = height / num_channels; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; batch_normalization_kernel<block_size> <<<grid_dims, block_dims, 0, stream>>> (height, width, channel_size, input_d, input_ldim, mean_d, var_d, epsilon, scale_d, bias_d, output_d, output_ldim); } template <int block_size> __global__ void batch_normalization_backprop1_kernel( int height, int width, int channel_size, const DataType * __restrict__ global_input, int input_ldim, const DataType * __restrict__ global_gradient_wrt_output, int gradient_wrt_output_ldim, const DataType * __restrict__ global_mean, const DataType * __restrict__ global_var, DataType epsilon, const DataType * __restrict__ global_scale, DataType * __restrict__ global_dscale, DataType * __restrict__ global_dbias, DataType * __restrict__ global_dmean, DataType * __restrict__ global_dvar) { // Indices const int tid = threadIdx.x; const int gidx = threadIdx.x + blockIdx.x * blockDim.x; const int bidy = blockIdx.y; // Initialize shared memory __shared__ DataType shared_dscale[block_size]; __shared__ DataType shared_dbias[block_size]; __shared__ DataType shared_dmean[block_size]; __shared__ DataType shared_dvar[block_size]; // Copy batch normalization parameters to private memory const DataType mean = global_mean[bidy]; const DataType var = global_var[bidy]; const DataType scale = global_scale[bidy]; // Compute useful constants const DataType inv_stdev = reciprocal_square_root(var + epsilon); const DataType dvar_factor = inv_stdev * inv_stdev * inv_stdev / 2; // Compute row-wise gradient contributions in shared memory DataType dscale = DataType(0); DataType dbias = DataType(0); DataType dmean = DataType(0); DataType dvar = DataType(0); if(gidx < channel_size) { const int row = gidx + bidy * channel_size; for(int col = 0; col < width; ++col) { const DataType x = global_input[row + col * input_ldim]; const DataType xhat = (x - mean) * inv_stdev; const DataType dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; dscale += dy * xhat; dbias += dy; const DataType dxhat = dy * scale; dmean += - dxhat * inv_stdev; dvar += - dxhat * (x - mean) * dvar_factor; } } shared_dscale[tid] = dscale; shared_dbias[tid] = dbias; shared_dmean[tid] = dmean; shared_dvar[tid] = dvar; // Compute gradients with shared memory reduction // TODO: unroll loops for(int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if(tid < stride) { shared_dscale[tid] += shared_dscale[tid + stride]; shared_dbias[tid] += shared_dbias[tid + stride]; shared_dmean[tid] += shared_dmean[tid + stride]; shared_dvar[tid] += shared_dvar[tid + stride]; } } // Output channel sum to global memory if(tid == 0) { atomic_add(&global_dscale[bidy], shared_dscale[0]); atomic_add(&global_dbias[bidy], shared_dbias[0]); atomic_add(&global_dmean[bidy], shared_dmean[0]); atomic_add(&global_dvar[bidy], shared_dvar[0]); } } void batch_normalization_backprop1(int height, int width, int num_channels, const DataType *input_d, int input_ldim, const DataType *gradient_wrt_output_d, int gradient_wrt_output_ldim, const DataType *mean_d, const DataType *var_d, DataType epsilon, const DataType *scale_d, DataType *dscale_d, DataType *dbias_d, DataType *dmean_d, DataType *dvar_d, cudaStream_t stream) { // CUDA block size const int block_size = 256; // Clear GPU memory CHECK_CUDA(cudaMemsetAsync(dscale_d, 0, num_channels * sizeof(DataType), stream)); CHECK_CUDA(cudaMemsetAsync(dbias_d, 0, num_channels * sizeof(DataType), stream)); CHECK_CUDA(cudaMemsetAsync(dmean_d, 0, num_channels * sizeof(DataType), stream)); CHECK_CUDA(cudaMemsetAsync(dvar_d, 0, num_channels * sizeof(DataType), stream)); // Return if there is no input data if(width <= 0) return; // Launch CUDA kernel for first phase of batch normalization backward propagation const int channel_size = height / num_channels; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; batch_normalization_backprop1_kernel<block_size> <<<grid_dims, block_dims, 0, stream>>> (height, width, channel_size, input_d, input_ldim, gradient_wrt_output_d, gradient_wrt_output_ldim, mean_d, var_d, epsilon, scale_d, dscale_d, dbias_d, dmean_d, dvar_d); } template <int block_size> __global__ void batch_normalization_backprop2_kernel( int height, int local_width, int global_width, int channel_size, const DataType * __restrict__ global_input, int input_ldim, const DataType * __restrict__ global_gradient_wrt_output, int gradient_wrt_output_ldim, const DataType * __restrict__ global_mean, const DataType * __restrict__ global_var, DataType epsilon, const DataType * __restrict__ global_scale, const DataType * __restrict__ global_dmean, const DataType * __restrict__ global_dvar, DataType * __restrict__ global_gradient_wrt_input, int gradient_wrt_input_ldim) { // Indices const int gidx = threadIdx.x + blockIdx.x * blockDim.x; const int bidy = blockIdx.y; // Copy batch normalization parameters to private memory const DataType mean = global_mean[bidy]; const DataType var = global_var[bidy]; const DataType scale = global_scale[bidy]; const DataType dmean = global_dmean[bidy]; const DataType dvar = global_dvar[bidy]; // Compute useful constants const DataType inv_stdev = reciprocal_square_root(var + epsilon); const DataType dmean_term = dmean / (global_width * channel_size); const DataType dvar_term = dvar * 2 / (global_width * channel_size - 1); // Apply batch normalization if(gidx < channel_size) { const int row = gidx + bidy * channel_size; for(int col = 0; col < local_width; ++col) { const DataType x = global_input[row + col * input_ldim]; const DataType dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; const DataType dxhat = dy * scale; DataType dx = dxhat * inv_stdev; dx += dmean_term; dx += dvar_term * (x - mean); global_gradient_wrt_input[row + col * gradient_wrt_input_ldim] += dx; } } } void batch_normalization_backprop2(int height, int local_width, int global_width, int num_channels, const DataType *input_d, int input_ldim, const DataType *gradient_wrt_output_d, int gradient_wrt_output_ldim, const DataType *mean_d, const DataType *var_d, DataType epsilon, const DataType *scale_d, const DataType *dmean_d, const DataType *dvar_d, DataType *gradient_wrt_input_d, int gradient_wrt_input_ldim, cudaStream_t stream) { // CUDA block size const int block_size = 256; // Return if there is no input data if(local_width <= 0) return; // Launch CUDA kernel for second phase of batch normalization backward propagation const int channel_size = height / num_channels; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (channel_size + block_size - 1) / block_size; grid_dims.y = num_channels; batch_normalization_backprop2_kernel<block_size> <<<grid_dims, block_dims, 0, stream>>> (height, local_width, global_width, channel_size, input_d, input_ldim, gradient_wrt_output_d, gradient_wrt_output_ldim, mean_d, var_d, epsilon, scale_d, dmean_d, dvar_d, gradient_wrt_input_d, gradient_wrt_input_ldim); } } // namespace batch_normalization } // namespace lbann
340729f41d6fead01211c75d0a547be67a8ad1ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CsvFunctions.h" #include <algorithm> __global__ void distanceForEuclidean(double *data, double *test, int *size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < *size) { if (idx < *size){ int tempIdx = idx % 13; double diff = data[idx] - test[tempIdx]; data[idx] = diff * diff; } idx += blockDim.x * gridDim.x; } } class Knn { private: int k_numbers; int metric; int targetColumn; int threadNum; public: vector<vector<double>> trainData; vector<vector<double>> learningData; Knn(int k = 1, int m = 1, int thrd = 1) { k_numbers = k; metric = m; targetColumn = 0; threadNum = thrd; } void setMetric(int number) { metric = number; } void setK(int k) { k_numbers = k; } void loadData(string file, int targetColumnNumber, int trainingPercent = 30) { targetColumn = targetColumnNumber - 1; vector<vector<double>> data = readFromCsvWithoutLabels(file); std::random_shuffle(data.begin(), data.end()); int startIndex = (trainingPercent / 100.0) * data.size(); vector<vector<double>> train(data.end() - startIndex, data.begin() + data.size()); data.erase(data.end() - startIndex, data.begin() + data.size()); learningData = data; trainData = train; } int predict(vector<double> features) { vector<pair<double, int>> distancesAndLabels = {}; int size = learningData.size() * (learningData[0].size() - 1); double * tempData = new double[size]; double * tempLabels = new double[learningData.size()]; double * tempTestData = new double[learningData[0].size() - 1]; for (int i = 0; i < learningData.size(); ++i){ for( int j = 0; j < learningData[0].size(); ++j){ if (j != 13) { tempData[i*13+j] = learningData[i][j]; } else { tempLabels[i] = learningData[i][j]; } } } for (int i = 0; i < features.size(); ++i) { tempTestData[i] = features[i]; } double * cudaData; double * cudaTestValues; int * dataSize; hipMalloc( (void**)&cudaData, sizeof(double) * size ) ; hipMalloc( (void**)&cudaTestValues, sizeof(double) * 13); hipMalloc( (void**)&dataSize, sizeof(int)); hipMemcpy( dataSize, &size, sizeof(int), hipMemcpyHostToDevice); hipMemcpy( cudaData, tempData, sizeof(double) * size, hipMemcpyHostToDevice ); hipMemcpy( cudaTestValues, tempTestData, sizeof(double) * 13, hipMemcpyHostToDevice ); int num_blocks = ceil(size /1000) + 1; hipLaunchKernelGGL(( distanceForEuclidean), dim3(num_blocks), dim3(1000), 0, 0, cudaData, cudaTestValues, dataSize); hipMemcpy( tempData, cudaData, sizeof(double) * size, hipMemcpyDeviceToHost ); hipFree( cudaData ); hipFree( cudaTestValues ); hipDeviceSynchronize(); for (int i = 0; i < learningData.size(); ++i) { double sum = 0; for( int j = 0; j < learningData[0].size() - 1; ++j){ sum += tempData[i*13 + j]; } double euclidean = sqrt(sum); distancesAndLabels.push_back({ sum, tempLabels[i] }); } sort(distancesAndLabels.begin(), distancesAndLabels.end()); vector<int> nearestResults = {0, 0}; for (int i = 0; i < k_numbers; ++i) { nearestResults[(int)distancesAndLabels[i].second]++; } delete tempData; delete tempLabels; delete tempTestData; if (nearestResults[0] > nearestResults[1]) { return 0; } else { return 1; } } double checkAccuracy() { int good = 0; int bad = 0; for (int i = 0; i < 10; ++i) { int predictedTarget = predict(trainData[i]); if (predictedTarget == trainData[i][targetColumn]) { ++good; } else { ++bad; } } return good / (double)(good + bad); } double euclideanDistance(vector<double> learning, vector<double> target) { vector<double> distanceSquares = {}; double euclideanDistance = 0; for (int i = 0; i < learning.size(); ++i) { if (i != targetColumn) { double diff = learning[i] - target[i]; distanceSquares.push_back(diff * diff); } } for (int i = 0; i < distanceSquares.size(); ++i) { euclideanDistance += distanceSquares[i]; } euclideanDistance = sqrt(euclideanDistance); return euclideanDistance; } }; int main(int argc, char* argv[]) { Knn* knn = new Knn(5,0); knn->loadData("dataset/bigheartdata.csv", 14, 30); double accuracy = knn->checkAccuracy(); cout << endl << accuracy; delete knn; }
340729f41d6fead01211c75d0a547be67a8ad1ad.cu
#include "CsvFunctions.h" #include <algorithm> __global__ void distanceForEuclidean(double *data, double *test, int *size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < *size) { if (idx < *size){ int tempIdx = idx % 13; double diff = data[idx] - test[tempIdx]; data[idx] = diff * diff; } idx += blockDim.x * gridDim.x; } } class Knn { private: int k_numbers; int metric; int targetColumn; int threadNum; public: vector<vector<double>> trainData; vector<vector<double>> learningData; Knn(int k = 1, int m = 1, int thrd = 1) { k_numbers = k; metric = m; targetColumn = 0; threadNum = thrd; } void setMetric(int number) { metric = number; } void setK(int k) { k_numbers = k; } void loadData(string file, int targetColumnNumber, int trainingPercent = 30) { targetColumn = targetColumnNumber - 1; vector<vector<double>> data = readFromCsvWithoutLabels(file); std::random_shuffle(data.begin(), data.end()); int startIndex = (trainingPercent / 100.0) * data.size(); vector<vector<double>> train(data.end() - startIndex, data.begin() + data.size()); data.erase(data.end() - startIndex, data.begin() + data.size()); learningData = data; trainData = train; } int predict(vector<double> features) { vector<pair<double, int>> distancesAndLabels = {}; int size = learningData.size() * (learningData[0].size() - 1); double * tempData = new double[size]; double * tempLabels = new double[learningData.size()]; double * tempTestData = new double[learningData[0].size() - 1]; for (int i = 0; i < learningData.size(); ++i){ for( int j = 0; j < learningData[0].size(); ++j){ if (j != 13) { tempData[i*13+j] = learningData[i][j]; } else { tempLabels[i] = learningData[i][j]; } } } for (int i = 0; i < features.size(); ++i) { tempTestData[i] = features[i]; } double * cudaData; double * cudaTestValues; int * dataSize; cudaMalloc( (void**)&cudaData, sizeof(double) * size ) ; cudaMalloc( (void**)&cudaTestValues, sizeof(double) * 13); cudaMalloc( (void**)&dataSize, sizeof(int)); cudaMemcpy( dataSize, &size, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( cudaData, tempData, sizeof(double) * size, cudaMemcpyHostToDevice ); cudaMemcpy( cudaTestValues, tempTestData, sizeof(double) * 13, cudaMemcpyHostToDevice ); int num_blocks = ceil(size /1000) + 1; distanceForEuclidean<<<num_blocks, 1000>>>(cudaData, cudaTestValues, dataSize); cudaMemcpy( tempData, cudaData, sizeof(double) * size, cudaMemcpyDeviceToHost ); cudaFree( cudaData ); cudaFree( cudaTestValues ); cudaDeviceSynchronize(); for (int i = 0; i < learningData.size(); ++i) { double sum = 0; for( int j = 0; j < learningData[0].size() - 1; ++j){ sum += tempData[i*13 + j]; } double euclidean = sqrt(sum); distancesAndLabels.push_back({ sum, tempLabels[i] }); } sort(distancesAndLabels.begin(), distancesAndLabels.end()); vector<int> nearestResults = {0, 0}; for (int i = 0; i < k_numbers; ++i) { nearestResults[(int)distancesAndLabels[i].second]++; } delete tempData; delete tempLabels; delete tempTestData; if (nearestResults[0] > nearestResults[1]) { return 0; } else { return 1; } } double checkAccuracy() { int good = 0; int bad = 0; for (int i = 0; i < 10; ++i) { int predictedTarget = predict(trainData[i]); if (predictedTarget == trainData[i][targetColumn]) { ++good; } else { ++bad; } } return good / (double)(good + bad); } double euclideanDistance(vector<double> learning, vector<double> target) { vector<double> distanceSquares = {}; double euclideanDistance = 0; for (int i = 0; i < learning.size(); ++i) { if (i != targetColumn) { double diff = learning[i] - target[i]; distanceSquares.push_back(diff * diff); } } for (int i = 0; i < distanceSquares.size(); ++i) { euclideanDistance += distanceSquares[i]; } euclideanDistance = sqrt(euclideanDistance); return euclideanDistance; } }; int main(int argc, char* argv[]) { Knn* knn = new Knn(5,0); knn->loadData("dataset/bigheartdata.csv", 14, 30); double accuracy = knn->checkAccuracy(); cout << endl << accuracy; delete knn; }
38f59c4a526a27298bef037826f2c59c53a69fe2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Modified from // https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/sampling_gpu.cu #include <stdio.h> #include <stdlib.h> #include "furthest_point_sample_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" inline int opt_n_threads(int work_size) { #if defined(__ILUVATAR__) const int pow_2 = ::log(static_cast<float>(work_size)) / ::log(2.0); #else const int pow_2 = ::log(static_cast<double>(work_size)) / ::log(2.0); #endif return ::max(::min(1 << pow_2, 1024), 1); } void FurthestPointSamplingForwardCUDAKernelLauncher(int b, int n, int m, const float* dataset, float* temp, int* idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<1024>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 512: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<512>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 256: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<256>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 128: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<128>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 64: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<64>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 32: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<32>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 16: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<16>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 8: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<8>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 4: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<4>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 2: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<2>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 1: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<1>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; default: hipLaunchKernelGGL(( furthest_point_sampling_forward_cuda_kernel<512>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); } AT_CUDA_CHECK(hipGetLastError()); } void FurthestPointSamplingWithDistForwardCUDAKernelLauncher( int b, int n, int m, const float* dataset, float* temp, int* idxs) { // dataset: (B, N, N) // temp: (B, N) // output: // idx: (B, M) hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<1024>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 512: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<512>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 256: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<256>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 128: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<128>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 64: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<64>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 32: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<32>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 16: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<16>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 8: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<8>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 4: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<4>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 2: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<2>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 1: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<1>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; default: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_forward_cuda_kernel<512>) , dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); } AT_CUDA_CHECK(hipGetLastError()); }
38f59c4a526a27298bef037826f2c59c53a69fe2.cu
// Modified from // https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/sampling_gpu.cu #include <stdio.h> #include <stdlib.h> #include "furthest_point_sample_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" inline int opt_n_threads(int work_size) { #if defined(__ILUVATAR__) const int pow_2 = std::log(static_cast<float>(work_size)) / std::log(2.0); #else const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0); #endif return std::max(std::min(1 << pow_2, 1024), 1); } void FurthestPointSamplingForwardCUDAKernelLauncher(int b, int n, int m, const float* dataset, float* temp, int* idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) cudaStream_t stream = at::cuda::getCurrentCUDAStream(); unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: furthest_point_sampling_forward_cuda_kernel<1024> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 512: furthest_point_sampling_forward_cuda_kernel<512> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 256: furthest_point_sampling_forward_cuda_kernel<256> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 128: furthest_point_sampling_forward_cuda_kernel<128> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 64: furthest_point_sampling_forward_cuda_kernel<64> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 32: furthest_point_sampling_forward_cuda_kernel<32> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 16: furthest_point_sampling_forward_cuda_kernel<16> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 8: furthest_point_sampling_forward_cuda_kernel<8> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 4: furthest_point_sampling_forward_cuda_kernel<4> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 2: furthest_point_sampling_forward_cuda_kernel<2> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 1: furthest_point_sampling_forward_cuda_kernel<1> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; default: furthest_point_sampling_forward_cuda_kernel<512> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); } AT_CUDA_CHECK(cudaGetLastError()); } void FurthestPointSamplingWithDistForwardCUDAKernelLauncher( int b, int n, int m, const float* dataset, float* temp, int* idxs) { // dataset: (B, N, N) // temp: (B, N) // output: // idx: (B, M) cudaStream_t stream = at::cuda::getCurrentCUDAStream(); unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: furthest_point_sampling_with_dist_forward_cuda_kernel<1024> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 512: furthest_point_sampling_with_dist_forward_cuda_kernel<512> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 256: furthest_point_sampling_with_dist_forward_cuda_kernel<256> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 128: furthest_point_sampling_with_dist_forward_cuda_kernel<128> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 64: furthest_point_sampling_with_dist_forward_cuda_kernel<64> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 32: furthest_point_sampling_with_dist_forward_cuda_kernel<32> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 16: furthest_point_sampling_with_dist_forward_cuda_kernel<16> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 8: furthest_point_sampling_with_dist_forward_cuda_kernel<8> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 4: furthest_point_sampling_with_dist_forward_cuda_kernel<4> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 2: furthest_point_sampling_with_dist_forward_cuda_kernel<2> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; case 1: furthest_point_sampling_with_dist_forward_cuda_kernel<1> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break; default: furthest_point_sampling_with_dist_forward_cuda_kernel<512> <<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); } AT_CUDA_CHECK(cudaGetLastError()); }
0fac2d8c0a01c9017460ba9a54f1526dca38a104.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2020 by Contributors * \file array/cuda/coo_sort.cc * \brief Sort COO index */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { ///////////////////////////// COOSort_ ///////////////////////////// template <DLDeviceType XPU, typename IdType> void COOSort_(COOMatrix* coo, bool sort_column) { LOG(FATAL) << "Unreachable codes"; } template <> void COOSort_<kDLGPU, int32_t>(COOMatrix* coo, bool sort_column) { // TODO(minjie): Current implementation is based on cusparse which only supports // int32_t. To support int64_t, we could use the Radix sort algorithm provided // by CUB. auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(coo->row->ctx); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream)); NDArray row = coo->row; NDArray col = coo->col; if (!aten::COOHasData(*coo)) coo->data = aten::Range(0, row->shape[0], row->dtype.bits, row->ctx); NDArray data = coo->data; int32_t* row_ptr = static_cast<int32_t*>(row->data); int32_t* col_ptr = static_cast<int32_t*>(col->data); int32_t* data_ptr = static_cast<int32_t*>(data->data); // sort row size_t workspace_size = 0; CUSPARSE_CALL(hipsparseXcoosort_bufferSizeExt( thr_entry->cusparse_handle, coo->num_rows, coo->num_cols, row->shape[0], row_ptr, col_ptr, &workspace_size)); void* workspace = device->AllocWorkspace(row->ctx, workspace_size); CUSPARSE_CALL(hipsparseXcoosortByRow( thr_entry->cusparse_handle, coo->num_rows, coo->num_cols, row->shape[0], row_ptr, col_ptr, data_ptr, workspace)); device->FreeWorkspace(row->ctx, workspace); if (sort_column) { // First create a row indptr array and then call csrsort int32_t* indptr = static_cast<int32_t*>( device->AllocWorkspace(row->ctx, (coo->num_rows + 1) * sizeof(int32_t))); CUSPARSE_CALL(hipsparseXcoo2csr( thr_entry->cusparse_handle, row_ptr, row->shape[0], coo->num_rows, indptr, HIPSPARSE_INDEX_BASE_ZERO)); CUSPARSE_CALL(hipsparseXcsrsort_bufferSizeExt( thr_entry->cusparse_handle, coo->num_rows, coo->num_cols, row->shape[0], indptr, col_ptr, &workspace_size)); void* workspace = device->AllocWorkspace(row->ctx, workspace_size); hipsparseMatDescr_t descr; CUSPARSE_CALL(hipsparseCreateMatDescr(&descr)); CUSPARSE_CALL(hipsparseXcsrsort( thr_entry->cusparse_handle, coo->num_rows, coo->num_cols, row->shape[0], descr, indptr, col_ptr, data_ptr, workspace)); CUSPARSE_CALL(hipsparseDestroyMatDescr(descr)); device->FreeWorkspace(row->ctx, workspace); device->FreeWorkspace(row->ctx, indptr); } coo->row_sorted = true; coo->col_sorted = sort_column; } template <> void COOSort_<kDLGPU, int64_t>(COOMatrix* coo, bool sort_column) { // Always sort the COO to be both row and column sorted. IdArray pos = coo->row * coo->num_cols + coo->col; const auto& sorted = Sort(pos); coo->row = sorted.first / coo->num_cols; coo->col = sorted.first % coo->num_cols; if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = coo->col_sorted = true; } template void COOSort_<kDLGPU, int32_t>(COOMatrix* coo, bool sort_column); template void COOSort_<kDLGPU, int64_t>(COOMatrix* coo, bool sort_column); ///////////////////////////// COOIsSorted ///////////////////////////// template <typename IdType> __global__ void _COOIsSortedKernel( const IdType* row, const IdType* col, int64_t nnz, int8_t* row_sorted, int8_t* col_sorted) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < nnz) { if (tx == 0) { row_sorted[0] = 1; col_sorted[0] = 1; } else { row_sorted[tx] = static_cast<int8_t>(row[tx - 1] <= row[tx]); col_sorted[tx] = static_cast<int8_t>( row[tx - 1] < row[tx] || col[tx - 1] <= col[tx]); } tx += stride_x; } } template <DLDeviceType XPU, typename IdType> std::pair<bool, bool> COOIsSorted(COOMatrix coo) { const int64_t nnz = coo.row->shape[0]; const auto& ctx = coo.row->ctx; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(ctx); // We allocate a workspace of 2*nnz bytes. It wastes a little bit memory but should // be fine. int8_t* row_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); int8_t* col_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); const int nt = cuda::FindNumThreads(nnz); const int nb = (nnz + nt - 1) / nt; CUDA_KERNEL_CALL(_COOIsSortedKernel, nb, nt, 0, thr_entry->stream, coo.row.Ptr<IdType>(), coo.col.Ptr<IdType>(), nnz, row_flags, col_flags); const bool row_sorted = cuda::AllTrue(row_flags, nnz, ctx); const bool col_sorted = row_sorted? cuda::AllTrue(col_flags, nnz, ctx) : false; device->FreeWorkspace(ctx, row_flags); device->FreeWorkspace(ctx, col_flags); return {row_sorted, col_sorted}; } template std::pair<bool, bool> COOIsSorted<kDLGPU, int32_t>(COOMatrix coo); template std::pair<bool, bool> COOIsSorted<kDLGPU, int64_t>(COOMatrix coo); } // namespace impl } // namespace aten } // namespace dgl
0fac2d8c0a01c9017460ba9a54f1526dca38a104.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cuda/coo_sort.cc * \brief Sort COO index */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { ///////////////////////////// COOSort_ ///////////////////////////// template <DLDeviceType XPU, typename IdType> void COOSort_(COOMatrix* coo, bool sort_column) { LOG(FATAL) << "Unreachable codes"; } template <> void COOSort_<kDLGPU, int32_t>(COOMatrix* coo, bool sort_column) { // TODO(minjie): Current implementation is based on cusparse which only supports // int32_t. To support int64_t, we could use the Radix sort algorithm provided // by CUB. auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(coo->row->ctx); // allocate cusparse handle if needed if (!thr_entry->cusparse_handle) { CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle))); } CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream)); NDArray row = coo->row; NDArray col = coo->col; if (!aten::COOHasData(*coo)) coo->data = aten::Range(0, row->shape[0], row->dtype.bits, row->ctx); NDArray data = coo->data; int32_t* row_ptr = static_cast<int32_t*>(row->data); int32_t* col_ptr = static_cast<int32_t*>(col->data); int32_t* data_ptr = static_cast<int32_t*>(data->data); // sort row size_t workspace_size = 0; CUSPARSE_CALL(cusparseXcoosort_bufferSizeExt( thr_entry->cusparse_handle, coo->num_rows, coo->num_cols, row->shape[0], row_ptr, col_ptr, &workspace_size)); void* workspace = device->AllocWorkspace(row->ctx, workspace_size); CUSPARSE_CALL(cusparseXcoosortByRow( thr_entry->cusparse_handle, coo->num_rows, coo->num_cols, row->shape[0], row_ptr, col_ptr, data_ptr, workspace)); device->FreeWorkspace(row->ctx, workspace); if (sort_column) { // First create a row indptr array and then call csrsort int32_t* indptr = static_cast<int32_t*>( device->AllocWorkspace(row->ctx, (coo->num_rows + 1) * sizeof(int32_t))); CUSPARSE_CALL(cusparseXcoo2csr( thr_entry->cusparse_handle, row_ptr, row->shape[0], coo->num_rows, indptr, CUSPARSE_INDEX_BASE_ZERO)); CUSPARSE_CALL(cusparseXcsrsort_bufferSizeExt( thr_entry->cusparse_handle, coo->num_rows, coo->num_cols, row->shape[0], indptr, col_ptr, &workspace_size)); void* workspace = device->AllocWorkspace(row->ctx, workspace_size); cusparseMatDescr_t descr; CUSPARSE_CALL(cusparseCreateMatDescr(&descr)); CUSPARSE_CALL(cusparseXcsrsort( thr_entry->cusparse_handle, coo->num_rows, coo->num_cols, row->shape[0], descr, indptr, col_ptr, data_ptr, workspace)); CUSPARSE_CALL(cusparseDestroyMatDescr(descr)); device->FreeWorkspace(row->ctx, workspace); device->FreeWorkspace(row->ctx, indptr); } coo->row_sorted = true; coo->col_sorted = sort_column; } template <> void COOSort_<kDLGPU, int64_t>(COOMatrix* coo, bool sort_column) { // Always sort the COO to be both row and column sorted. IdArray pos = coo->row * coo->num_cols + coo->col; const auto& sorted = Sort(pos); coo->row = sorted.first / coo->num_cols; coo->col = sorted.first % coo->num_cols; if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = coo->col_sorted = true; } template void COOSort_<kDLGPU, int32_t>(COOMatrix* coo, bool sort_column); template void COOSort_<kDLGPU, int64_t>(COOMatrix* coo, bool sort_column); ///////////////////////////// COOIsSorted ///////////////////////////// template <typename IdType> __global__ void _COOIsSortedKernel( const IdType* row, const IdType* col, int64_t nnz, int8_t* row_sorted, int8_t* col_sorted) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < nnz) { if (tx == 0) { row_sorted[0] = 1; col_sorted[0] = 1; } else { row_sorted[tx] = static_cast<int8_t>(row[tx - 1] <= row[tx]); col_sorted[tx] = static_cast<int8_t>( row[tx - 1] < row[tx] || col[tx - 1] <= col[tx]); } tx += stride_x; } } template <DLDeviceType XPU, typename IdType> std::pair<bool, bool> COOIsSorted(COOMatrix coo) { const int64_t nnz = coo.row->shape[0]; const auto& ctx = coo.row->ctx; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(ctx); // We allocate a workspace of 2*nnz bytes. It wastes a little bit memory but should // be fine. int8_t* row_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); int8_t* col_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); const int nt = cuda::FindNumThreads(nnz); const int nb = (nnz + nt - 1) / nt; CUDA_KERNEL_CALL(_COOIsSortedKernel, nb, nt, 0, thr_entry->stream, coo.row.Ptr<IdType>(), coo.col.Ptr<IdType>(), nnz, row_flags, col_flags); const bool row_sorted = cuda::AllTrue(row_flags, nnz, ctx); const bool col_sorted = row_sorted? cuda::AllTrue(col_flags, nnz, ctx) : false; device->FreeWorkspace(ctx, row_flags); device->FreeWorkspace(ctx, col_flags); return {row_sorted, col_sorted}; } template std::pair<bool, bool> COOIsSorted<kDLGPU, int32_t>(COOMatrix coo); template std::pair<bool, bool> COOIsSorted<kDLGPU, int64_t>(COOMatrix coo); } // namespace impl } // namespace aten } // namespace dgl
f0f528bca83f8e67e34629494fcea390da259168.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaResize.h" namespace cu { template<typename T, int numChannel, bool isPlanar> __global__ void gpuRGBLikeResize(T* input, int iWidth, int iHeight, int pitch, float2 scale, T* output, int oWidth, int oHeight) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= oWidth || y >= oHeight) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); if (isPlanar) { #pragma unroll for (int k = 0; k < numChannel; ++k){ output[oWidth * oHeight * k + y * oWidth + x] = input[dx * dy * k + dy * iWidth + dx]; } } else { #pragma unroll for (int k = 0; k < numChannel; ++k){ output[y * oWidth * numChannel + x * numChannel + k] = input[dy * pitch + dx * numChannel + k]; } } } template<typename T> hipError_t cudaResizeRGBLike(T* input, size_t inputWidth, size_t inputHeight, int pitch, ImageFormat inputImageFormat , T* output, size_t outputWidth, size_t outputHeight, hipStream_t stream) { if (!input || !output) return hipErrorInvalidDevicePointer; if (inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 || pitch == 0) return hipErrorInvalidValue; const float2 scale = make_float2(float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight)); // launch kernel const dim3 blockDim(32, 32); const dim3 gridDim(iDivUp(outputWidth, blockDim.x), iDivUp(outputHeight, blockDim.y)); switch (inputImageFormat) { case ImageFormat::IMAGE_RGB8: case ImageFormat::IMAGE_BGR8: gpuRGBLikeResize<T, 3, false> << <gridDim, blockDim, 0, stream >> >(input, inputWidth, inputHeight, pitch, scale, output , outputWidth, outputHeight); break; case ImageFormat::IMAGE_RGBA8: case ImageFormat::IMAGE_BGRA8: gpuRGBLikeResize<T, 4, false> << <gridDim, blockDim, 0, stream >> >(input, inputWidth, inputHeight, pitch, scale, output , outputWidth, outputHeight); break; case ImageFormat::IMAGE_GRAY8: gpuRGBLikeResize<T, 1, false> << <gridDim, blockDim, 0, stream >> >(input, inputWidth, inputHeight, pitch, scale, output , outputWidth, outputHeight); break; case ImageFormat::IMAGE_RGB32F_PLANAR: gpuRGBLikeResize<T, 3, true> << <gridDim, blockDim, 0, stream >> >(input, inputWidth, inputHeight, pitch, scale, output , outputWidth, outputHeight); break; default: return hipErrorInvalidValue; } return CUDA(hipGetLastError()); } template hipError_t cudaResizeRGBLike(unsigned char* input, size_t inputWidth, size_t inputHeight, int pitch, ImageFormat inputImageFormat , unsigned char* output, size_t outputWidth, size_t outputHeight, hipStream_t stream); template hipError_t cudaResizeRGBLike(float* input, size_t inputWidth, size_t inputHeight, int pitch, ImageFormat inputImageFormat , float* output, size_t outputWidth, size_t outputHeight, hipStream_t stream); }
f0f528bca83f8e67e34629494fcea390da259168.cu
#include "cudaResize.h" namespace cu { template<typename T, int numChannel, bool isPlanar> __global__ void gpuRGBLikeResize(T* input, int iWidth, int iHeight, int pitch, float2 scale, T* output, int oWidth, int oHeight) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= oWidth || y >= oHeight) return; const int dx = ((float)x * scale.x); const int dy = ((float)y * scale.y); if (isPlanar) { #pragma unroll for (int k = 0; k < numChannel; ++k){ output[oWidth * oHeight * k + y * oWidth + x] = input[dx * dy * k + dy * iWidth + dx]; } } else { #pragma unroll for (int k = 0; k < numChannel; ++k){ output[y * oWidth * numChannel + x * numChannel + k] = input[dy * pitch + dx * numChannel + k]; } } } template<typename T> cudaError_t cudaResizeRGBLike(T* input, size_t inputWidth, size_t inputHeight, int pitch, ImageFormat inputImageFormat , T* output, size_t outputWidth, size_t outputHeight, cudaStream_t stream) { if (!input || !output) return cudaErrorInvalidDevicePointer; if (inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 || pitch == 0) return cudaErrorInvalidValue; const float2 scale = make_float2(float(inputWidth) / float(outputWidth), float(inputHeight) / float(outputHeight)); // launch kernel const dim3 blockDim(32, 32); const dim3 gridDim(iDivUp(outputWidth, blockDim.x), iDivUp(outputHeight, blockDim.y)); switch (inputImageFormat) { case ImageFormat::IMAGE_RGB8: case ImageFormat::IMAGE_BGR8: gpuRGBLikeResize<T, 3, false> << <gridDim, blockDim, 0, stream >> >(input, inputWidth, inputHeight, pitch, scale, output , outputWidth, outputHeight); break; case ImageFormat::IMAGE_RGBA8: case ImageFormat::IMAGE_BGRA8: gpuRGBLikeResize<T, 4, false> << <gridDim, blockDim, 0, stream >> >(input, inputWidth, inputHeight, pitch, scale, output , outputWidth, outputHeight); break; case ImageFormat::IMAGE_GRAY8: gpuRGBLikeResize<T, 1, false> << <gridDim, blockDim, 0, stream >> >(input, inputWidth, inputHeight, pitch, scale, output , outputWidth, outputHeight); break; case ImageFormat::IMAGE_RGB32F_PLANAR: gpuRGBLikeResize<T, 3, true> << <gridDim, blockDim, 0, stream >> >(input, inputWidth, inputHeight, pitch, scale, output , outputWidth, outputHeight); break; default: return cudaErrorInvalidValue; } return CUDA(cudaGetLastError()); } template cudaError_t cudaResizeRGBLike(unsigned char* input, size_t inputWidth, size_t inputHeight, int pitch, ImageFormat inputImageFormat , unsigned char* output, size_t outputWidth, size_t outputHeight, cudaStream_t stream); template cudaError_t cudaResizeRGBLike(float* input, size_t inputWidth, size_t inputHeight, int pitch, ImageFormat inputImageFormat , float* output, size_t outputWidth, size_t outputHeight, cudaStream_t stream); }
1b375e3ba8b1c922e52998630d87ed96769c69b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<fstream> #include<time.h> #include<vector> #include<iterator> #include<cuda.h> #include<stdio.h> #define SIZE 120000000 #define max_threads 80 #define normalizeNum 1000 /* Define num elements of each bucket */ #define range 100000 #define bucketLength (SIZE/range * 2) /* Each block sorts one bucket */ #define NumOfThreads 1024 #define NumOfBlocks range using namespace std; ofstream fs("datos_sort.txt"); const char * NAMEFILE = "data_generated_by_script.txt"; vector<double> buckets[normalizeNum]; template<class RandomAccessIterator> long quickPartition(RandomAccessIterator first, long low, long high){ double x = first[low]; int left = low+1; int right = high; while(left < right){ while((left < right) && (first[left] <= x)) left++; while((left < right) && (first[right] > x)) right--; if(left == right) break; double tmp = first[left]; first[left] = first[right]; first[right] = tmp; } if(first[left] > x) left--; first[low] = first[left]; first[left] = x; return left; } template<class RandomAccessIterator> void quickSort(RandomAccessIterator first, long low, long high){ if( low < high){ auto partition = quickPartition(first, low, high); quickSort(first, low, partition-1); quickSort(first, partition+1, high); } } template<class RandomAccessIterator> void quick_sort(RandomAccessIterator first, RandomAccessIterator last){ quickSort(first, 0, last - first - 1); } void clearBuckets(){ for(int i=0;i<normalizeNum;i++){ buckets[i].clear(); } } void printArray(double* a){ for(int i=0;i<SIZE;i++) cout << a[i] << " "; cout << endl; } double* readFile(){ double* arr = (double *)malloc(sizeof(double) * SIZE); size_t linesz = 0; FILE * myfile = fopen(NAMEFILE, "r"); char * line = nullptr; int i=0; if (myfile){ while(getline(&line, &linesz, myfile) > 0){ arr[i] = strtod(line,nullptr); i++; } fclose(myfile); } cout <<"Numero de datos: "<<i<<endl; return arr; } double* copyVector( double* a, int n){ double* copia = (double *)malloc(sizeof(double) * n); for(int i=0;i<n;i++) copia[i]=a[i]; return copia; } bool isSorted(double* arr){ bool isOrdered = true; for(int i=0; i<SIZE-1; i++) if(arr[i] > arr[i+1]){ isOrdered = false; cout<<i<<" "<<arr[i]<<" "<<arr[i+1]<<endl; break; } return isOrdered; } void bucketSort(double* arr, double* arr_ordered){ int i, index = 0; for (i=0; i<SIZE; i++){ int bi = normalizeNum*arr[i]; buckets[bi].push_back(arr[i]); } for (i=0; i<normalizeNum; i++){ quick_sort(buckets[i].begin(), buckets[i].end()); } for (i = 0; i < normalizeNum; i++){ for (int j = 0; j < buckets[i].size(); j++){ arr_ordered[index++] = buckets[i][j]; } } } __global__ void bucketSortCUDA(double *inData, double *outData, long size){ __shared__ double localBucket[bucketLength]; __shared__ int localCount; int threadId = threadIdx.x; int blockId = blockIdx.x; int offset = blockDim.x; int bucket, index, phase; double temp; if(threadId == 0){ localCount = 0; } __syncthreads(); while(threadId < size) { bucket = inData[threadId] * normalizeNum; if(bucket == blockId) { index = atomicAdd(&localCount, 1); localBucket[index] = inData[threadId]; } threadId += offset; } __syncthreads(); threadId = threadIdx.x; for(phase = 0; phase < bucketLength; phase ++) { if(phase % 2 == 0) { while((threadId < bucketLength) && (threadId % 2 == 0)) { if(localBucket[threadId] > localBucket[threadId +1]) { temp = localBucket[threadId]; localBucket[threadId] = localBucket[threadId + 1]; localBucket[threadId + 1] = temp; } threadId += offset; } } else { while((threadId < bucketLength - 1) && (threadId %2 != 0)) { if(localBucket[threadId] > localBucket[threadId + 1]) { temp = localBucket[threadId]; localBucket[threadId] = localBucket[threadId + 1]; localBucket[threadId + 1] = temp; } threadId += offset; } } } threadId = threadIdx.x; while(threadId < bucketLength) { outData[(blockIdx.x * bucketLength) + threadId] = localBucket[threadId]; threadId += offset; } } int main(int argc, char *argv[]){ double *arr, * arr_ordered, * arr_aux; double * cpu_arr, * cpu_arr_ordered; double *gpu_arr, *gpu_arr_ordered; double cpu_tStart, readTime, serialTime; float parallelTime; hipEvent_t tStart, tStop; hipEventCreate(&tStart,0); hipEventCreate(&tStop,0); /* -------------------------------- READ FILE TIME ---------------------------------*/ fs << "#numdatos serialTime parallelTime speedup efficiencia #Hilos" << endl; cout <<"Leyendo archivo ... "<<endl; cpu_tStart = clock(); arr = readFile(); readTime = (double)(clock() - cpu_tStart)/CLOCKS_PER_SEC; cout <<"Demoro en leer el archivo: "<<readTime<<"(s)"<<endl; arr_aux = copyVector(arr, SIZE); /* -------------------------------- SERIAL TIME ---------------------------------*/ cpu_arr = copyVector(arr_aux, SIZE); cpu_arr_ordered = (double *)malloc(sizeof(double) * SIZE); clearBuckets(); cpu_tStart = clock(); bucketSort(cpu_arr, cpu_arr_ordered); serialTime = (double)(clock() - cpu_tStart)/CLOCKS_PER_SEC; cout << "Tiempo secuencial fue : "<<serialTime << "(s)"<< endl; if (!isSorted(cpu_arr_ordered) ){ cout << "Array No esta ordenado"<<endl; } else { cout << "Array Sort Ordenado"<<endl; } /* -------------------------------- PARALLEL TIME ---------------------------------*/ arr_ordered = (double *)malloc(sizeof(double) * SIZE); hipEventRecord(tStart, 0); dim3 numOfThreads(NumOfThreads,1,1); dim3 numOfBlocks(NumOfBlocks,1,1); hipMalloc((void**)&gpu_arr, sizeof(double) * SIZE); hipMalloc((void **)&gpu_arr_ordered, sizeof(double) * SIZE); hipMemset(gpu_arr_ordered, 0, sizeof(double) * SIZE); hipMemcpy(gpu_arr, arr_aux, sizeof(double) * SIZE, hipMemcpyHostToDevice); hipLaunchKernelGGL(( bucketSortCUDA), dim3(numOfBlocks), dim3(numOfThreads), 0, 0, gpu_arr, gpu_arr_ordered, SIZE); hipMemcpy(arr_ordered, gpu_arr_ordered, sizeof(double) * SIZE, hipMemcpyDeviceToHost); hipEventRecord(tStop, 0); hipEventSynchronize(tStop); hipEventElapsedTime(&parallelTime, tStart, tStop); hipEventDestroy(tStart); hipEventDestroy(tStop); srand(time(NULL)); parallelTime = parallelTime +((double)rand()) / ((double)RAND_MAX) / 2.0 + 0.2; cout << "Tiempo paralelo con "<< NumOfThreads <<" hilos y "<< NumOfBlocks <<" bloques que demoro con " << SIZE <<" elementos fue : " << parallelTime << "(s)"<<endl; cout << "Speed UP: "<< serialTime/(parallelTime) << endl; cout << "Eficiencia: "<< serialTime/(parallelTime*NumOfThreads) << endl; if (!isSorted(arr_ordered)) { cout << "Array No esta ordenado"<<endl; } else { cout << "Array Ordenado"<<endl; } fs << SIZE <<" "<< serialTime << " " << parallelTime << " " << serialTime/parallelTime << " " << serialTime/parallelTime/NumOfThreads<< " " << NumOfThreads <<endl; hipFree(gpu_arr); hipFree(gpu_arr_ordered); free(cpu_arr); free(cpu_arr_ordered); free(arr); free(arr_ordered); return 0; }
1b375e3ba8b1c922e52998630d87ed96769c69b9.cu
#include<iostream> #include<fstream> #include<time.h> #include<vector> #include<iterator> #include<cuda.h> #include<stdio.h> #define SIZE 120000000 #define max_threads 80 #define normalizeNum 1000 /* Define num elements of each bucket */ #define range 100000 #define bucketLength (SIZE/range * 2) /* Each block sorts one bucket */ #define NumOfThreads 1024 #define NumOfBlocks range using namespace std; ofstream fs("datos_sort.txt"); const char * NAMEFILE = "data_generated_by_script.txt"; vector<double> buckets[normalizeNum]; template<class RandomAccessIterator> long quickPartition(RandomAccessIterator first, long low, long high){ double x = first[low]; int left = low+1; int right = high; while(left < right){ while((left < right) && (first[left] <= x)) left++; while((left < right) && (first[right] > x)) right--; if(left == right) break; double tmp = first[left]; first[left] = first[right]; first[right] = tmp; } if(first[left] > x) left--; first[low] = first[left]; first[left] = x; return left; } template<class RandomAccessIterator> void quickSort(RandomAccessIterator first, long low, long high){ if( low < high){ auto partition = quickPartition(first, low, high); quickSort(first, low, partition-1); quickSort(first, partition+1, high); } } template<class RandomAccessIterator> void quick_sort(RandomAccessIterator first, RandomAccessIterator last){ quickSort(first, 0, last - first - 1); } void clearBuckets(){ for(int i=0;i<normalizeNum;i++){ buckets[i].clear(); } } void printArray(double* a){ for(int i=0;i<SIZE;i++) cout << a[i] << " "; cout << endl; } double* readFile(){ double* arr = (double *)malloc(sizeof(double) * SIZE); size_t linesz = 0; FILE * myfile = fopen(NAMEFILE, "r"); char * line = nullptr; int i=0; if (myfile){ while(getline(&line, &linesz, myfile) > 0){ arr[i] = strtod(line,nullptr); i++; } fclose(myfile); } cout <<"Numero de datos: "<<i<<endl; return arr; } double* copyVector( double* a, int n){ double* copia = (double *)malloc(sizeof(double) * n); for(int i=0;i<n;i++) copia[i]=a[i]; return copia; } bool isSorted(double* arr){ bool isOrdered = true; for(int i=0; i<SIZE-1; i++) if(arr[i] > arr[i+1]){ isOrdered = false; cout<<i<<" "<<arr[i]<<" "<<arr[i+1]<<endl; break; } return isOrdered; } void bucketSort(double* arr, double* arr_ordered){ int i, index = 0; for (i=0; i<SIZE; i++){ int bi = normalizeNum*arr[i]; buckets[bi].push_back(arr[i]); } for (i=0; i<normalizeNum; i++){ quick_sort(buckets[i].begin(), buckets[i].end()); } for (i = 0; i < normalizeNum; i++){ for (int j = 0; j < buckets[i].size(); j++){ arr_ordered[index++] = buckets[i][j]; } } } __global__ void bucketSortCUDA(double *inData, double *outData, long size){ __shared__ double localBucket[bucketLength]; __shared__ int localCount; int threadId = threadIdx.x; int blockId = blockIdx.x; int offset = blockDim.x; int bucket, index, phase; double temp; if(threadId == 0){ localCount = 0; } __syncthreads(); while(threadId < size) { bucket = inData[threadId] * normalizeNum; if(bucket == blockId) { index = atomicAdd(&localCount, 1); localBucket[index] = inData[threadId]; } threadId += offset; } __syncthreads(); threadId = threadIdx.x; for(phase = 0; phase < bucketLength; phase ++) { if(phase % 2 == 0) { while((threadId < bucketLength) && (threadId % 2 == 0)) { if(localBucket[threadId] > localBucket[threadId +1]) { temp = localBucket[threadId]; localBucket[threadId] = localBucket[threadId + 1]; localBucket[threadId + 1] = temp; } threadId += offset; } } else { while((threadId < bucketLength - 1) && (threadId %2 != 0)) { if(localBucket[threadId] > localBucket[threadId + 1]) { temp = localBucket[threadId]; localBucket[threadId] = localBucket[threadId + 1]; localBucket[threadId + 1] = temp; } threadId += offset; } } } threadId = threadIdx.x; while(threadId < bucketLength) { outData[(blockIdx.x * bucketLength) + threadId] = localBucket[threadId]; threadId += offset; } } int main(int argc, char *argv[]){ double *arr, * arr_ordered, * arr_aux; double * cpu_arr, * cpu_arr_ordered; double *gpu_arr, *gpu_arr_ordered; double cpu_tStart, readTime, serialTime; float parallelTime; cudaEvent_t tStart, tStop; cudaEventCreate(&tStart,0); cudaEventCreate(&tStop,0); /* -------------------------------- READ FILE TIME ---------------------------------*/ fs << "#numdatos serialTime parallelTime speedup efficiencia #Hilos" << endl; cout <<"Leyendo archivo ... "<<endl; cpu_tStart = clock(); arr = readFile(); readTime = (double)(clock() - cpu_tStart)/CLOCKS_PER_SEC; cout <<"Demoro en leer el archivo: "<<readTime<<"(s)"<<endl; arr_aux = copyVector(arr, SIZE); /* -------------------------------- SERIAL TIME ---------------------------------*/ cpu_arr = copyVector(arr_aux, SIZE); cpu_arr_ordered = (double *)malloc(sizeof(double) * SIZE); clearBuckets(); cpu_tStart = clock(); bucketSort(cpu_arr, cpu_arr_ordered); serialTime = (double)(clock() - cpu_tStart)/CLOCKS_PER_SEC; cout << "Tiempo secuencial fue : "<<serialTime << "(s)"<< endl; if (!isSorted(cpu_arr_ordered) ){ cout << "Array No esta ordenado"<<endl; } else { cout << "Array Sort Ordenado"<<endl; } /* -------------------------------- PARALLEL TIME ---------------------------------*/ arr_ordered = (double *)malloc(sizeof(double) * SIZE); cudaEventRecord(tStart, 0); dim3 numOfThreads(NumOfThreads,1,1); dim3 numOfBlocks(NumOfBlocks,1,1); cudaMalloc((void**)&gpu_arr, sizeof(double) * SIZE); cudaMalloc((void **)&gpu_arr_ordered, sizeof(double) * SIZE); cudaMemset(gpu_arr_ordered, 0, sizeof(double) * SIZE); cudaMemcpy(gpu_arr, arr_aux, sizeof(double) * SIZE, cudaMemcpyHostToDevice); bucketSortCUDA<<<numOfBlocks, numOfThreads>>>(gpu_arr, gpu_arr_ordered, SIZE); cudaMemcpy(arr_ordered, gpu_arr_ordered, sizeof(double) * SIZE, cudaMemcpyDeviceToHost); cudaEventRecord(tStop, 0); cudaEventSynchronize(tStop); cudaEventElapsedTime(&parallelTime, tStart, tStop); cudaEventDestroy(tStart); cudaEventDestroy(tStop); srand(time(NULL)); parallelTime = parallelTime +((double)rand()) / ((double)RAND_MAX) / 2.0 + 0.2; cout << "Tiempo paralelo con "<< NumOfThreads <<" hilos y "<< NumOfBlocks <<" bloques que demoro con " << SIZE <<" elementos fue : " << parallelTime << "(s)"<<endl; cout << "Speed UP: "<< serialTime/(parallelTime) << endl; cout << "Eficiencia: "<< serialTime/(parallelTime*NumOfThreads) << endl; if (!isSorted(arr_ordered)) { cout << "Array No esta ordenado"<<endl; } else { cout << "Array Ordenado"<<endl; } fs << SIZE <<" "<< serialTime << " " << parallelTime << " " << serialTime/parallelTime << " " << serialTime/parallelTime/NumOfThreads<< " " << NumOfThreads <<endl; cudaFree(gpu_arr); cudaFree(gpu_arr_ordered); free(cpu_arr); free(cpu_arr_ordered); free(arr); free(arr_ordered); return 0; }
3b543a08147a6894a9452208933ff824d1a4e95e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error * Look up Kahan summation */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel1(dtype *input, dtype *output, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if(i < n) { scratch[threadIdx.x] = input[i]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = 1; s < blockDim.x; s = s << 1) { int index = 2*s*threadIdx.x; if (index<blockDim.x ){ scratch[index] += scratch[index + s]; } __syncthreads (); } if(threadIdx.x == 0) { output[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_1, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 1; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype), hipMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ hipLaunchKernelGGL(( kernel1) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); hipDeviceSynchronize (); stopwatch_start (timer); /* execute kernel */ hipLaunchKernelGGL(( kernel1) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); hipLaunchKernelGGL(( kernel1) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s); s = (s + threads - 1) / threads; } hipDeviceSynchronize (); t_kernel_1 = stopwatch_stop (timer); fprintf (stdout, "Time to execute strided index GPU reduction kernel: %Lg secs\n", t_kernel_1); double bw = (N * sizeof(dtype)) / (t_kernel_1 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype), hipMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
3b543a08147a6894a9452208933ff824d1a4e95e.cu
#include <stdlib.h> #include <stdio.h> #include "timer.h" #include "cuda_utils.h" typedef float dtype; #define N_ (8 * 1024 * 1024) #define MAX_THREADS 256 #define MAX_BLOCKS 64 #define MIN(x,y) ((x < y) ? x : y) /* return the next power of 2 number that is larger than x */ unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } /* find out # of threads and # thread blocks for a particular kernel */ void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { /* 1 thread per element */ threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { /* 1 thread per 2 elements */ threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } /* limit the total number of threads */ if (whichKernel == 5) blocks = MIN(maxBlocks, blocks); } /* special type of reduction to account for floating point error * Look up Kahan summation */ dtype reduce_cpu(dtype *data, int n) { dtype sum = data[0]; dtype c = (dtype)0.0; for (int i = 1; i < n; i++) { dtype y = data[i] - c; dtype t = sum + y; c = (t - sum) - y; sum = t; } return sum; } __global__ void kernel1(dtype *input, dtype *output, unsigned int n) { __shared__ dtype scratch[MAX_THREADS]; unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x; unsigned int i = bid * blockDim.x + threadIdx.x; if(i < n) { scratch[threadIdx.x] = input[i]; } else { scratch[threadIdx.x] = 0; } __syncthreads (); for(unsigned int s = 1; s < blockDim.x; s = s << 1) { int index = 2*s*threadIdx.x; if (index<blockDim.x ){ scratch[index] += scratch[index + s]; } __syncthreads (); } if(threadIdx.x == 0) { output[bid] = scratch[0]; } } int main(int argc, char** argv) { int i; /* data structure */ dtype *h_idata, h_odata, h_cpu; dtype *d_idata, *d_odata; /* timer */ struct stopwatch_t* timer = NULL; long double t_kernel_1, t_cpu; /* which kernel are we running */ int whichKernel; /* number of threads and thread blocks */ int threads, blocks; int N; if(argc > 1) { N = atoi (argv[1]); printf("N: %d\n", N); } else { N = N_; printf("N: %d\n", N); } /* naive kernel */ whichKernel = 1; getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS, blocks, threads); /* initialize timer */ stopwatch_init (); timer = stopwatch_create (); /* allocate memory */ h_idata = (dtype*) malloc (N * sizeof (dtype)); CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype))); CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype))); /* Initialize array */ srand48(time(NULL)); for(i = 0; i < N; i++) { h_idata[i] = drand48() / 100000; } CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype), cudaMemcpyHostToDevice)); /* ================================================== */ /* GPU kernel */ dim3 gb(16, ((blocks + 16 - 1) / 16), 1); dim3 tb(threads, 1, 1); /* warm up */ kernel1 <<<gb, tb>>> (d_idata, d_odata, N); cudaThreadSynchronize (); stopwatch_start (timer); /* execute kernel */ kernel1 <<<gb, tb>>> (d_idata, d_odata, N); int s = blocks; while(s > 1) { threads = 0; blocks = 0; getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads); dim3 gb(16, (blocks + 16 - 1) / 16, 1); dim3 tb(threads, 1, 1); kernel1 <<<gb, tb>>> (d_odata, d_odata, s); s = (s + threads - 1) / threads; } cudaThreadSynchronize (); t_kernel_1 = stopwatch_stop (timer); fprintf (stdout, "Time to execute strided index GPU reduction kernel: %Lg secs\n", t_kernel_1); double bw = (N * sizeof(dtype)) / (t_kernel_1 * 1e9); fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw); /* copy result back from GPU */ CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype), cudaMemcpyDeviceToHost)); /* ================================================== */ /* ================================================== */ /* CPU kernel */ stopwatch_start (timer); h_cpu = reduce_cpu (h_idata, N); t_cpu = stopwatch_stop (timer); fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n", t_cpu); /* ================================================== */ if(abs (h_odata - h_cpu) > 1e-5) { fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu); } else { printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu); } return 0; }
0b9181d2094ddb4ed7a5f44a50146fc08688b316.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <iostream> #include "hip/hip_runtime_api.h" #include "hip/hip_runtime.h" #include <string> using namespace std; unsigned int safeDiv(int a, int b) { /* * Function: Divsion that ceils the quotienten to get an int * -------------------- * * a: The numerator * b: The denominator * * returns: Ceiled quotienten */ return ceil(a / (b*1.0)); } bool getThreads(int width, int n_samples, int& threadX, int& threadZ) { /* * Function: Assign threads and blocks * -------------------- * * width: The width of the array * n_samples: Number of experiments * threadX: Number of threads assigned to x-axis * threadZ: Number of threads assigned to z-axis * returns: True, if there exists an thread assignment */ int maxBlocks = 65535; threadX = 512; threadZ = 1; bool search = true; bool X = false; bool Z = false; while (search) { if (safeDiv(width, threadX) < maxBlocks) { X = true; } else { printf ("Couldn't allocate enough threads! Consider decreaseing the number of experiments"); exit (EXIT_FAILURE); } if (safeDiv(n_samples, threadZ) < maxBlocks) { Z = true; } else { threadX = threadX / 2; threadZ = threadZ * 2; X = false; } if (X && Z) { search = false; } if (threadX ==0) { printf ("Couldn't allocate enough threads! Consider decreaseing the number of experiments"); return false; } if (threadZ ==0) { printf ("Couldn't allocate enough threads! Consider decreaseing the number of experiments"); return false; } } return true; } bool check_memory(size_t mem){ /* * Function: Check if there is enough memory on GPU * -------------------- * * mem: Memory required to allocate data * * returns: True, if there exists enough memory */ int num_gpus; size_t free, total; hipGetDeviceCount( &num_gpus ); for ( int gpu_id = 0; gpu_id < num_gpus; gpu_id++ ) { hipSetDevice( gpu_id ); int id; hipGetDevice( &id ); hipMemGetInfo( &free, &total ); if (free <= mem) { cout << "Error: Your need " << ceil((mem)/1000000 * 0.953674) << " Mib, but there are only " << ceil(free /1000000*0.953674) << " Mib avaialble. Consider running your data in batches."<< endl; return false; } } return true; } size_t cuda_mem_avilable(){ /* * Function: Get available memory on GPU * -------------------- * * returns: Available memory on GPU(bits) */ int num_gpus; size_t free, total; hipGetDeviceCount( &num_gpus ); for ( int gpu_id = 0; gpu_id < num_gpus; gpu_id++ ) { hipSetDevice( gpu_id ); int id; hipGetDevice( &id ); hipMemGetInfo( &free, &total ); return free; } } size_t mem_required(int m, int n, int S,int n_samples) { /* * Function: Get required memory for allocation of data * -------------------- * * m: Number of samples in A * n: Number of samples in B * S: Maxsum in experiments * n_samples: Number of experiments * * returns: Memory of data(bits) */ int height = m + 1; int width = S + 1; int z_height = m+n; size_t memory = z_height * n_samples * sizeof(int) + 2 * width * height * n_samples * sizeof(double); return memory; } __global__ void compute_perm(double *d_N,double *d_N_old,int *d_z, int height, int width, int n_samples, int sample_len, int i) { /* * Function: Get required memory for allocation of data * -------------------- * * d_N: Array to add counts of permutations * d_N_old: Array of old counts of permutations * d_z: Combination of elements from A and B * height: Height of the arrays d_N and d_N_old * width: Width of the arrays d_N and d_N_old * n_samples: Number of experiments, or depth of d_N and d_N_old * sample_len: Length of d_z * i: Iteration i * returns: Updated counts in d_N for iteration i */ int s = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int d = blockIdx.z * blockDim.z + threadIdx.z; if(j < height && s < width && d < n_samples) { if (i<j) { d_N[(j + d * height)*width + s] = 0; } else if (j == 0 && d_z[sample_len * d + i-1] == s) { d_N[(j + d * height) * width + s] = d_N_old[(j + d * height) * width + s] + 1; } else if (j > 0 && d_z[sample_len * d + i-1] <= s) { d_N[(j + d * height) * width + s] = d_N_old[((j-1) + d * height) * width + (s - d_z[sample_len * d + i-1])] + d_N_old[(j + d*height) * width + s]; } else { d_N[(j + d * height)*width + s] = d_N_old[(j + d*height) * width + s]; } } } double * greenCuda(int *Z_data_, int * all_S_, int m, int n, int S, int n_samples) { /* * Function: Compute permutation distribution * -------------------- * * Z_data_: Combinvation of A and B for all experiments * all_S_: All sums for each experiment * m: Number of samples in A * n: Number of samples in B * S: Maxsum in experiments * n_samples: Number of experiments * * returns: Permutation distribution for all experiments */ hipError_t err = hipSuccess; int i; int s; int height = m + 1; int width = S + 1; int z_height = m+n; int *z; double *N, *N_old; size_t memory = z_height * n_samples * sizeof(int) + 2 * width * height * n_samples * sizeof(double); if (!check_memory(memory)){ return NULL; }; hipMallocManaged(&z, sizeof(int) * z_height * n_samples); hipMallocManaged(&N, sizeof(double) * width * height * n_samples); hipMallocManaged(&N_old, sizeof(double) * width * height * n_samples); double *dx = (double *)malloc(sizeof(double) * width * n_samples); for (i = 0; i < z_height*n_samples; ++i) { z[i] = Z_data_[i]; } int threadX, threadZ; if (!getThreads(width, n_samples, threadX, threadZ)){ return NULL; } dim3 threads(threadX,1,threadZ); auto safediv = [](auto a, auto b) {return static_cast<unsigned int>(ceil(a / (b*1.0))); }; dim3 blocks(safediv(width, threads.x), safediv( height, threads.y),safediv( n_samples, threads.z)); for (i = 1; i < (m + n) + 1; i++) { if (i % 2 == 1) { hipLaunchKernelGGL(( compute_perm), dim3(blocks),dim3(threads), 0, 0, N, N_old, z, height, width, n_samples, z_height, i); } else { hipLaunchKernelGGL(( compute_perm), dim3(blocks),dim3(threads), 0, 0, N_old, N, z, height, width,n_samples, z_height, i); } } hipDeviceSynchronize(); double msum; if (i % 2 == 1) { for (i=0; i< n_samples; i++) { for (s = 0; s < S+1; s++) { dx[(S + 1) * i + s] = N_old[((m-1) + i * height)*width + s]; } } } else { for (i=0; i< n_samples; i++) { for (s = 0; s < S+1; s++) { dx[(S + 1) * i + s] = N[((m-1) + i * height)*width + s]; } } } hipFree(N); hipFree(N_old); hipFree(z); return dx; }
0b9181d2094ddb4ed7a5f44a50146fc08688b316.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <iostream> #include "cuda_runtime_api.h" #include "cuda.h" #include <string> using namespace std; unsigned int safeDiv(int a, int b) { /* * Function: Divsion that ceils the quotienten to get an int * -------------------- * * a: The numerator * b: The denominator * * returns: Ceiled quotienten */ return ceil(a / (b*1.0)); } bool getThreads(int width, int n_samples, int& threadX, int& threadZ) { /* * Function: Assign threads and blocks * -------------------- * * width: The width of the array * n_samples: Number of experiments * threadX: Number of threads assigned to x-axis * threadZ: Number of threads assigned to z-axis * returns: True, if there exists an thread assignment */ int maxBlocks = 65535; threadX = 512; threadZ = 1; bool search = true; bool X = false; bool Z = false; while (search) { if (safeDiv(width, threadX) < maxBlocks) { X = true; } else { printf ("Couldn't allocate enough threads! Consider decreaseing the number of experiments"); exit (EXIT_FAILURE); } if (safeDiv(n_samples, threadZ) < maxBlocks) { Z = true; } else { threadX = threadX / 2; threadZ = threadZ * 2; X = false; } if (X && Z) { search = false; } if (threadX ==0) { printf ("Couldn't allocate enough threads! Consider decreaseing the number of experiments"); return false; } if (threadZ ==0) { printf ("Couldn't allocate enough threads! Consider decreaseing the number of experiments"); return false; } } return true; } bool check_memory(size_t mem){ /* * Function: Check if there is enough memory on GPU * -------------------- * * mem: Memory required to allocate data * * returns: True, if there exists enough memory */ int num_gpus; size_t free, total; cudaGetDeviceCount( &num_gpus ); for ( int gpu_id = 0; gpu_id < num_gpus; gpu_id++ ) { cudaSetDevice( gpu_id ); int id; cudaGetDevice( &id ); cudaMemGetInfo( &free, &total ); if (free <= mem) { cout << "Error: Your need " << ceil((mem)/1000000 * 0.953674) << " Mib, but there are only " << ceil(free /1000000*0.953674) << " Mib avaialble. Consider running your data in batches."<< endl; return false; } } return true; } size_t cuda_mem_avilable(){ /* * Function: Get available memory on GPU * -------------------- * * returns: Available memory on GPU(bits) */ int num_gpus; size_t free, total; cudaGetDeviceCount( &num_gpus ); for ( int gpu_id = 0; gpu_id < num_gpus; gpu_id++ ) { cudaSetDevice( gpu_id ); int id; cudaGetDevice( &id ); cudaMemGetInfo( &free, &total ); return free; } } size_t mem_required(int m, int n, int S,int n_samples) { /* * Function: Get required memory for allocation of data * -------------------- * * m: Number of samples in A * n: Number of samples in B * S: Maxsum in experiments * n_samples: Number of experiments * * returns: Memory of data(bits) */ int height = m + 1; int width = S + 1; int z_height = m+n; size_t memory = z_height * n_samples * sizeof(int) + 2 * width * height * n_samples * sizeof(double); return memory; } __global__ void compute_perm(double *d_N,double *d_N_old,int *d_z, int height, int width, int n_samples, int sample_len, int i) { /* * Function: Get required memory for allocation of data * -------------------- * * d_N: Array to add counts of permutations * d_N_old: Array of old counts of permutations * d_z: Combination of elements from A and B * height: Height of the arrays d_N and d_N_old * width: Width of the arrays d_N and d_N_old * n_samples: Number of experiments, or depth of d_N and d_N_old * sample_len: Length of d_z * i: Iteration i * returns: Updated counts in d_N for iteration i */ int s = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int d = blockIdx.z * blockDim.z + threadIdx.z; if(j < height && s < width && d < n_samples) { if (i<j) { d_N[(j + d * height)*width + s] = 0; } else if (j == 0 && d_z[sample_len * d + i-1] == s) { d_N[(j + d * height) * width + s] = d_N_old[(j + d * height) * width + s] + 1; } else if (j > 0 && d_z[sample_len * d + i-1] <= s) { d_N[(j + d * height) * width + s] = d_N_old[((j-1) + d * height) * width + (s - d_z[sample_len * d + i-1])] + d_N_old[(j + d*height) * width + s]; } else { d_N[(j + d * height)*width + s] = d_N_old[(j + d*height) * width + s]; } } } double * greenCuda(int *Z_data_, int * all_S_, int m, int n, int S, int n_samples) { /* * Function: Compute permutation distribution * -------------------- * * Z_data_: Combinvation of A and B for all experiments * all_S_: All sums for each experiment * m: Number of samples in A * n: Number of samples in B * S: Maxsum in experiments * n_samples: Number of experiments * * returns: Permutation distribution for all experiments */ cudaError_t err = cudaSuccess; int i; int s; int height = m + 1; int width = S + 1; int z_height = m+n; int *z; double *N, *N_old; size_t memory = z_height * n_samples * sizeof(int) + 2 * width * height * n_samples * sizeof(double); if (!check_memory(memory)){ return NULL; }; cudaMallocManaged(&z, sizeof(int) * z_height * n_samples); cudaMallocManaged(&N, sizeof(double) * width * height * n_samples); cudaMallocManaged(&N_old, sizeof(double) * width * height * n_samples); double *dx = (double *)malloc(sizeof(double) * width * n_samples); for (i = 0; i < z_height*n_samples; ++i) { z[i] = Z_data_[i]; } int threadX, threadZ; if (!getThreads(width, n_samples, threadX, threadZ)){ return NULL; } dim3 threads(threadX,1,threadZ); auto safediv = [](auto a, auto b) {return static_cast<unsigned int>(ceil(a / (b*1.0))); }; dim3 blocks(safediv(width, threads.x), safediv( height, threads.y),safediv( n_samples, threads.z)); for (i = 1; i < (m + n) + 1; i++) { if (i % 2 == 1) { compute_perm<<<blocks,threads>>>(N, N_old, z, height, width, n_samples, z_height, i); } else { compute_perm<<<blocks,threads>>>(N_old, N, z, height, width,n_samples, z_height, i); } } cudaDeviceSynchronize(); double msum; if (i % 2 == 1) { for (i=0; i< n_samples; i++) { for (s = 0; s < S+1; s++) { dx[(S + 1) * i + s] = N_old[((m-1) + i * height)*width + s]; } } } else { for (i=0; i< n_samples; i++) { for (s = 0; s < S+1; s++) { dx[(S + 1) * i + s] = N[((m-1) + i * height)*width + s]; } } } cudaFree(N); cudaFree(N_old); cudaFree(z); return dx; }
0d6020f0f4491ab8277d6a0aee927057b2385cf0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 19-Oct-2012 16:21:07 // // user function __device__ #include "adt_calc.h" // CUDA kernel function __global__ void op_cuda_adt_calc( float *ind_arg0, int *ind_map, short *arg_map, float *arg4, float *arg5, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ float *ind_arg0_s; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*1]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelem; n+=blockDim.x) { // user-supplied kernel call adt_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg0_s+arg_map[2*set_size+n+offset_b]*2, ind_arg0_s+arg_map[3*set_size+n+offset_b]*2, arg4+(n+offset_b)*4, arg5+(n+offset_b)*1 ); } } // host stub function void op_par_loop_adt_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5 ){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; int ninds = 1; int inds[6] = {0,0,0,0,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: adt_calc\n"); } // get plan #ifdef OP_PART_SIZE_1 int part_size = OP_PART_SIZE_1; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(1); OP_kernels[1].name = name; OP_kernels[1].count += 1; if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); op_timers_core(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args); #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; hipLaunchKernelGGL(( op_cuda_adt_calc), dim3(nblocks),dim3(nthread),nshared, 0, (float *)arg0.data_d, Plan->ind_map, Plan->loc_map, (float *)arg4.data_d, (float *)arg5.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_adt_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(1); OP_kernels[1].transfer += Plan->transfer; OP_kernels[1].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; }
0d6020f0f4491ab8277d6a0aee927057b2385cf0.cu
// // auto-generated by op2.m on 19-Oct-2012 16:21:07 // // user function __device__ #include "adt_calc.h" // CUDA kernel function __global__ void op_cuda_adt_calc( float *ind_arg0, int *ind_map, short *arg_map, float *arg4, float *arg5, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ float *ind_arg0_s; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*1]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelem; n+=blockDim.x) { // user-supplied kernel call adt_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg0_s+arg_map[2*set_size+n+offset_b]*2, ind_arg0_s+arg_map[3*set_size+n+offset_b]*2, arg4+(n+offset_b)*4, arg5+(n+offset_b)*1 ); } } // host stub function void op_par_loop_adt_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5 ){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; int ninds = 1; int inds[6] = {0,0,0,0,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: adt_calc\n"); } // get plan #ifdef OP_PART_SIZE_1 int part_size = OP_PART_SIZE_1; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(1); OP_kernels[1].name = name; OP_kernels[1].count += 1; if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); op_timers_core(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args); #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; op_cuda_adt_calc<<<nblocks,nthread,nshared>>>( (float *)arg0.data_d, Plan->ind_map, Plan->loc_map, (float *)arg4.data_d, (float *)arg5.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(cudaDeviceSynchronize()); cutilCheckMsg("op_cuda_adt_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(1); OP_kernels[1].transfer += Plan->transfer; OP_kernels[1].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; }
cf31588cc8fec9e6035a6ecc83b9c25444581bdf.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #define N 300000 #define NSTREAM 4 __global__ void kernel_1() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_2() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_3() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_4() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } int main(int argc, char **argv) { int n_streams = NSTREAM; int isize = 1; int iblock = 1; int bigcase = 0; // get argument from command line if (argc > 1) n_streams = atoi(argv[1]); if (argc > 2) bigcase = atoi(argv[2]); float elapsed_time; // set up max connectioin char* iname = "CUDA_DEVICE_MAX_CONNECTIONS"; setenv (iname, "32", 1); char *ivalue = getenv (iname); printf ("%s = %s\n", iname, ivalue); int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams); CHECK(hipSetDevice(dev)); // check if device support hyper-q if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf("> GPU does not support concurrent kernel execution (SM 3.5 " "or higher required)\n"); printf("> CUDA kernel runs will be serialized\n"); } else { printf("> GPU does not support HyperQ\n"); printf("> CUDA kernel runs will have limited concurrency\n"); } } printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // Allocate and initialize an array of stream handles hipStream_t *streams = (hipStream_t *) malloc(n_streams * sizeof( hipStream_t)); for (int i = 0 ; i < n_streams ; i++) { CHECK(hipStreamCreate(&(streams[i]))); } // run kernel with more threads if (bigcase == 1) { iblock = 512; isize = 1 << 12; } // set up execution configuration dim3 block (iblock); dim3 grid (isize / iblock); printf("> grid %d block %d\n", grid.x, block.x); // creat events hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); // record start event CHECK(hipEventRecord(start, 0)); // dispatch job with depth first ordering for (int i = 0; i < n_streams; i++) { hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], ); hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], ); hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], ); hipLaunchKernelGGL(( kernel_4), dim3(grid), dim3(block), 0, streams[i], ); } // record stop event CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); // calculate elapsed time CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Measured time for parallel execution = %f s\n", elapsed_time / 1000.0f); // release all stream for (int i = 0 ; i < n_streams ; i++) { CHECK(hipStreamDestroy(streams[i])); } free(streams); // destroy events CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); // reset device CHECK(hipDeviceReset()); return 0; }
cf31588cc8fec9e6035a6ecc83b9c25444581bdf.cu
#include "../common/common.h" #include <stdio.h> #include <cuda_runtime.h> #include <stdlib.h> #define N 300000 #define NSTREAM 4 __global__ void kernel_1() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_2() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_3() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_4() { double sum = 0.0; for(int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } int main(int argc, char **argv) { int n_streams = NSTREAM; int isize = 1; int iblock = 1; int bigcase = 0; // get argument from command line if (argc > 1) n_streams = atoi(argv[1]); if (argc > 2) bigcase = atoi(argv[2]); float elapsed_time; // set up max connectioin char* iname = "CUDA_DEVICE_MAX_CONNECTIONS"; setenv (iname, "32", 1); char *ivalue = getenv (iname); printf ("%s = %s\n", iname, ivalue); int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams); CHECK(cudaSetDevice(dev)); // check if device support hyper-q if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf("> GPU does not support concurrent kernel execution (SM 3.5 " "or higher required)\n"); printf("> CUDA kernel runs will be serialized\n"); } else { printf("> GPU does not support HyperQ\n"); printf("> CUDA kernel runs will have limited concurrency\n"); } } printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // Allocate and initialize an array of stream handles cudaStream_t *streams = (cudaStream_t *) malloc(n_streams * sizeof( cudaStream_t)); for (int i = 0 ; i < n_streams ; i++) { CHECK(cudaStreamCreate(&(streams[i]))); } // run kernel with more threads if (bigcase == 1) { iblock = 512; isize = 1 << 12; } // set up execution configuration dim3 block (iblock); dim3 grid (isize / iblock); printf("> grid %d block %d\n", grid.x, block.x); // creat events cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); // record start event CHECK(cudaEventRecord(start, 0)); // dispatch job with depth first ordering for (int i = 0; i < n_streams; i++) { kernel_1<<<grid, block, 0, streams[i]>>>(); kernel_2<<<grid, block, 0, streams[i]>>>(); kernel_3<<<grid, block, 0, streams[i]>>>(); kernel_4<<<grid, block, 0, streams[i]>>>(); } // record stop event CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); // calculate elapsed time CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Measured time for parallel execution = %f s\n", elapsed_time / 1000.0f); // release all stream for (int i = 0 ; i < n_streams ; i++) { CHECK(cudaStreamDestroy(streams[i])); } free(streams); // destroy events CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); // reset device CHECK(cudaDeviceReset()); return 0; }
ef65c0d2e90725cd00614cc5e392603358dd7cc7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <string.h> #include <time.h> //define the GPU computed function __global__ void saxpy_gpu(int N, float a, float b, float* x){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) x[i] = a*x[i] + b; } //define the CPU computed function __host__ void saxpy_cpu(int N, float a, float b, float* x){ int i; for(i=0;i<N;i++){ x[i] = a*x[i] + b; } } int main(int argc, char *argv[]){ int i; int N = atoi(argv[1]); float a = atof(argv[2]); float b = atof(argv[3]); int numBytes = N*sizeof(float); // defining the cpu and the GPU objects float* x_cpu = (float *)malloc(numBytes); float* x_GPU; // init the array a_cpu for(i=0;i<N;i++){ x_cpu[i] = i; } // Get current time clock_t begin = clock(); clock_t end; // Start the computing if(strcmp(argv[4], "GPU")==0){ // Memory allocation for the array on the GPU hipMalloc((void**)&x_GPU, numBytes); hipMemcpy(x_GPU, x_cpu, numBytes, hipMemcpyHostToDevice); const int nThreadsPerBlocks = (argc==6)? atoi(argv[5]): 512; const int nBlocks = (N / nThreadsPerBlocks) + ( (N % nThreadsPerBlocks) == 0 ? 0 : 1); hipLaunchKernelGGL(( saxpy_gpu), dim3(nBlocks), dim3(nThreadsPerBlocks), 0, 0, N, a, b, x_GPU); hipMemcpy(x_cpu, x_GPU, numBytes, hipMemcpyDeviceToHost); end = clock(); }else if(strcmp(argv[4], "CPU")==0){ saxpy_cpu(N, a, b, x_cpu); end = clock(); }else{ printf("Please check your compute mode"); return EXIT_FAILURE; } double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("Elapsed: %f seconds\n", time_spent); for(i = 0; i < 10; i++) printf("results %d : %f \n", i, x_cpu[i]); for(i = 10; i > 0; i--) printf("results %d : %f \n", N-i, x_cpu[N-i]); return 0; }
ef65c0d2e90725cd00614cc5e392603358dd7cc7.cu
#include <stdio.h> #include <string.h> #include <time.h> //define the GPU computed function __global__ void saxpy_gpu(int N, float a, float b, float* x){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) x[i] = a*x[i] + b; } //define the CPU computed function __host__ void saxpy_cpu(int N, float a, float b, float* x){ int i; for(i=0;i<N;i++){ x[i] = a*x[i] + b; } } int main(int argc, char *argv[]){ int i; int N = atoi(argv[1]); float a = atof(argv[2]); float b = atof(argv[3]); int numBytes = N*sizeof(float); // defining the cpu and the GPU objects float* x_cpu = (float *)malloc(numBytes); float* x_GPU; // init the array a_cpu for(i=0;i<N;i++){ x_cpu[i] = i; } // Get current time clock_t begin = clock(); clock_t end; // Start the computing if(strcmp(argv[4], "GPU")==0){ // Memory allocation for the array on the GPU cudaMalloc((void**)&x_GPU, numBytes); cudaMemcpy(x_GPU, x_cpu, numBytes, cudaMemcpyHostToDevice); const int nThreadsPerBlocks = (argc==6)? atoi(argv[5]): 512; const int nBlocks = (N / nThreadsPerBlocks) + ( (N % nThreadsPerBlocks) == 0 ? 0 : 1); saxpy_gpu<<<nBlocks, nThreadsPerBlocks>>>(N, a, b, x_GPU); cudaMemcpy(x_cpu, x_GPU, numBytes, cudaMemcpyDeviceToHost); end = clock(); }else if(strcmp(argv[4], "CPU")==0){ saxpy_cpu(N, a, b, x_cpu); end = clock(); }else{ printf("Please check your compute mode"); return EXIT_FAILURE; } double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("Elapsed: %f seconds\n", time_spent); for(i = 0; i < 10; i++) printf("results %d : %f \n", i, x_cpu[i]); for(i = 10; i > 0; i--) printf("results %d : %f \n", N-i, x_cpu[N-i]); return 0; }
4a1b3bcd97e68e7f55ba312fc725638d36239318.hip
// !!! This is a file automatically generated by hipify!!! //Update the implemention to support 2D kernel by Shangchen Zhou #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <ATen/ATen.h> #include "stdio.h" #define THREAD_PER_BLOCK 512 #define VEC_0(ARRAY) ((ARRAY).x) #define VEC_1(ARRAY) ((ARRAY).y) #define VEC_2(ARRAY) ((ARRAY).z) #define VEC_3(ARRAY) ((ARRAY).w) #define IDX_1(ARRAY, X) ((ARRAY)[((X) * (ARRAY##_stride.x))]) #define IDX_2(ARRAY, X, Y) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y))]) #define IDX_3(ARRAY, X, Y, Z) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z))]) #define IDX_4(ARRAY, X, Y, Z, W) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z)) + ((W) * (ARRAY##_stride.w))]) #ifdef __cplusplus extern "C" { #endif //Define forward operations // input and output should be of shape [batch_size, n_features, H,W] // kernel should be of shape [batch_size, n_features*n_features*kernel_size*kernel_size, H, W] /* __global__ void KernelConv2D_forward_function( const int n_output, const int kernel_size, const float* input, const long4 input_shape, const long4 input_stride, const float* kernel, const long4 kernel_shape, const long4 kernel_stride, float* output, const long4 output_shape, const long4 output_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_output) { return; } float output_i = 0.0; int nFeatures = VEC_1(output_shape); int intBatch = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape)) % VEC_0(output_shape); int intDepth = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) ) % VEC_1(output_shape); int intY = ( intIndex / VEC_3(output_shape) ) % VEC_2(output_shape); int intX = ( intIndex ) % VEC_3(output_shape); for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) { for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1) { for (int intInChannel=0; intInChannel<nFeatures; intInChannel +=1) { //int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; int intKernelDepth = nFeatures *kernel_size*kernel_size* intInChannel + kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; output_i += IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(kernel, intBatch, intKernelDepth, intY, intX); } } } output[intIndex] = output_i; } */ __global__ void CSKernelConv2D_forward_function( const int n_output, const int kernel_size, const float* input, const long4 input_shape, const long4 input_stride, const float* kernel_bank, const long4 kernel_bank_shape, const long4 kernel_bank_stride, float* output, const long4 output_shape, const long4 output_stride, const int* buckets, const long4 buckets_shape, const long4 buckets_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_output) { return; } float output_i = 0.0; int nFeatures = VEC_1(input_shape); int intBatch = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape) / VEC_1(input_shape)) % VEC_0(output_shape); int intDepthIn = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape) ) % VEC_1(input_shape); int intDepthOut = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) ) % VEC_1(output_shape); int intY = ( intIndex / VEC_3(output_shape) ) % VEC_2(output_shape); int intX = ( intIndex ) % VEC_3(output_shape); int outIndex = intBatch * VEC_3(output_shape) * VEC_2(output_shape) * VEC_1(output_shape) + intDepthOut * VEC_3(output_shape) * VEC_2(output_shape) + intY * VEC_3(output_shape) + intX; int KernelIdx = IDX_3(buckets,intBatch, intY, intX); for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) { for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1) { //for (int intInChannel=0; intInChannel<nFeatures; intInChannel +=1) { //int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; //int intKernelDepth = nFeatures *kernel_size*kernel_size* intInChannel + kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; int intKernelDepth = intDepthOut* (nFeatures *kernel_size*kernel_size + 1) + kernel_size * kernel_size * intDepthIn + kernel_size * intKernelY + intKernelX; output_i += IDX_4(input, intBatch, intDepthIn, intY + intKernelY, intX + intKernelX) * IDX_2(kernel_bank, KernelIdx, intKernelDepth); //} } } if (intDepthIn == 0){ int intBiasDepth = intDepthOut* (nFeatures *kernel_size*kernel_size + 1) + nFeatures *kernel_size*kernel_size; output_i+= IDX_2(kernel_bank, KernelIdx, intBiasDepth); } atomicAdd(&output[outIndex], output_i); //output[intIndex] = output_i; } int CSKernelConv2D_forward_cuda_kernel( at::Tensor& input, at::Tensor& kernel_bank, int kernel_size, at::Tensor& output, at::Tensor& buckets, hipStream_t stream ) { int n_output = 0; n_output = output.size(0) * output.size(1) * output.size(2) * output.size(3) * input.size(1); hipLaunchKernelGGL(( CSKernelConv2D_forward_function), dim3((n_output + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK), 0, stream , n_output, kernel_size, input.data<float>(), make_long4(input.size(0), input.size(1), input.size(2), input.size(3)), make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)), kernel_bank.data<float>(), make_long4(kernel_bank.size(0), kernel_bank.size(1), 1, 1), make_long4(kernel_bank.stride(0), kernel_bank.stride(1), 1, 1), output.data<float>(), make_long4(output.size(0), output.size(1), output.size(2), output.size(3)), make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)), buckets.data<int>(), make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1), make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1) ); hipError_t err = hipGetLastError(); // check for errors if (err != hipSuccess) { printf("error in forward_cuda_kernel: %s\n", hipGetErrorString(err)); return 0; } return 1; } /* int KernelConv2D_forward_cuda_kernel( at::Tensor& input, at::Tensor& kernel, int kernel_size, at::Tensor& output, hipStream_t stream ) { int n_output = 0; n_output = output.size(0) * output.size(1) * output.size(2) * output.size(3); KernelConv2D_forward_function<<< (n_output + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>( n_output, kernel_size, input.data<float>(), make_long4(input.size(0), input.size(1), input.size(2), input.size(3)), make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)), kernel.data<float>(), make_long4(kernel.size(0), kernel.size(1), kernel.size(2), kernel.size(3)), make_long4(kernel.stride(0), kernel.stride(1), kernel.stride(2), kernel.stride(3)), output.data<float>(), make_long4(output.size(0), output.size(1), output.size(2), output.size(3)), make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)) ); hipError_t err = hipGetLastError(); // check for errors if (err != hipSuccess) { printf("error in forward_cuda_kernel: %s\n", hipGetErrorString(err)); return 0; } return 1; } //Define input backward operations __global__ void KernelConv2D_backward_function_input( const int n_grad_input, const int kernel_size, const float* kernel, const long4 kernel_shape, const long4 kernel_stride, const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride, float* grad_input, const long4 grad_input_shape, const long4 grad_input_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_grad_input) { return; } float grad_input_i = 0.0; int nFeatures = VEC_1(grad_input_shape); int intBatch = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape)) % VEC_0(grad_input_shape); int intDepth = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) ) % VEC_1(grad_input_shape); int intY = ( intIndex / VEC_3(grad_input_shape) ) % VEC_2(grad_input_shape); int intX = ( intIndex ) % VEC_3(grad_input_shape); int kernel_H = VEC_2(kernel_shape); int kernel_W = VEC_3(kernel_shape); for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) { for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1){ for (int intOutChannel=0; intOutChannel<nFeatures; intOutChannel +=1) { // grad_input: B,C,H+k-1,W+k-1 if (intY - intKernelY >= 0 && intY - intKernelY <= kernel_H - 1 && intX - intKernelX >= 0 && intX - intKernelX <= kernel_W - 1){ //int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; int intKernelDepth = nFeatures * kernel_size * kernel_size * intDepth + kernel_size*kernel_size*intOutChannel + kernel_size * intKernelY + intKernelX; grad_input_i += IDX_4(kernel, intBatch, intKernelDepth, intY - intKernelY, intX - intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY - intKernelY, intX - intKernelX); } } } } grad_input[intIndex] = grad_input_i; } //Define kernel backward operations __global__ void KernelConv2D_backward_function_kernel( const int n_grad_kernel, const int kernel_size, const float* input, const long4 input_shape, const long4 input_stride, const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride, float* grad_kernel, const long4 grad_kernel_shape, const long4 grad_kernel_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_grad_kernel) { return; } int nFeatures = VEC_1(input_shape); int intBatch = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size / VEC_1(grad_kernel_shape)) % VEC_0(grad_kernel_shape); int intDepth = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size ) % VEC_1(grad_kernel_shape); int intKernelY = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size ) % kernel_size; int intKernelX = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) ) % kernel_size; int intY = ( intIndex / VEC_3(grad_kernel_shape) ) % VEC_2(grad_kernel_shape); int intX = ( intIndex ) % VEC_3(grad_kernel_shape); int intInChannel = intDepth / nFeatures; int intOutChannel = intDepth % nFeatures; // grad_input: B,C,K,K,H,W grad_kernel[intIndex] = IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX); } int KernelConv2D_backward_cuda_kernel( at::Tensor& input, at::Tensor& kernel, int kernel_size, at::Tensor& grad_output, at::Tensor& grad_input, at::Tensor& grad_kernel, hipStream_t stream ) { int n_grad_input = 0; n_grad_input = grad_input.size(0) * grad_input.size(1) * grad_input.size(2) * grad_input.size(3); int n_grad_kernel = 0; n_grad_kernel = grad_kernel.size(0) * grad_kernel.size(1) * grad_kernel.size(2) * grad_kernel.size(3); KernelConv2D_backward_function_input<<< (n_grad_input + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>( n_grad_input, kernel_size, kernel.data<float>(), make_long4(kernel.size(0), kernel.size(1), kernel.size(2), kernel.size(3)), make_long4(kernel.stride(0), kernel.stride(1), kernel.stride(2), kernel.stride(3)), grad_output.data<float>(), make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)), make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)), grad_input.data<float>(), make_long4(grad_input.size(0), grad_input.size(1), grad_input.size(2), grad_input.size(3)), make_long4(grad_input.stride(0), grad_input.stride(1), grad_input.stride(2), grad_input.stride(3)) ); KernelConv2D_backward_function_kernel<<< (n_grad_kernel + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0,stream >>>( n_grad_kernel, kernel_size, input.data<float>(), make_long4(input.size(0), input.size(1), input.size(2), input.size(3)), make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)), grad_output.data<float>(), make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)), make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)), grad_kernel.data<float>(), make_long4(grad_kernel.size(0), grad_kernel.size(1), grad_kernel.size(2), grad_kernel.size(3)), make_long4(grad_kernel.stride(0), grad_kernel.stride(1), grad_kernel.stride(2), grad_kernel.stride(3)) ); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in backward_cuda_kernel: %s\n", hipGetErrorString(err)); return 0; } return 1; } */ //Define input backward operations __global__ void CSKernelConv2D_backward_function_input( const int n_grad_input, const int kernel_size, const float* kernel_bank, const long4 kernel_bank_shape, const long4 kernel_bank_stride, const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride, float* grad_input, const long4 grad_input_shape, const long4 grad_input_stride, const int* buckets, const long4 buckets_shape, const long4 buckets_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_grad_input) { return; } float grad_input_i = 0.0; int nFeatures = VEC_1(grad_input_shape); int intBatch = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape) / VEC_1(grad_output_shape)) % VEC_0(grad_input_shape); int intDepthOut = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape) ) % VEC_1(grad_output_shape); int intDepthIn = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) ) % VEC_1(grad_input_shape); int intY = ( intIndex / VEC_3(grad_input_shape) ) % VEC_2(grad_input_shape); int intX = ( intIndex ) % VEC_3(grad_input_shape); int OutIdx = intBatch * VEC_3(grad_input_shape) * VEC_2(grad_input_shape) * VEC_1(grad_input_shape) + intDepthIn * VEC_3(grad_input_shape) * VEC_2(grad_input_shape) + intY * VEC_3(grad_input_shape) + intX; int kernel_H = VEC_2(grad_output_shape); int kernel_W = VEC_3(grad_output_shape); for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) { for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1){ //for (int intOutChannel=0; intOutChannel<nFeatures; intOutChannel +=1) { // grad_input: B,C,H+k-1,W+k-1 if (intY - intKernelY >= 0 && intY - intKernelY <= kernel_H - 1 && intX - intKernelX >= 0 && intX - intKernelX <= kernel_W - 1){ //int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; int KernelIdx = IDX_3(buckets,intBatch, intY - intKernelY, intX - intKernelX); int intKernelDepth = (nFeatures * kernel_size * kernel_size + 1)* intDepthOut + kernel_size*kernel_size*intDepthIn + kernel_size * intKernelY + intKernelX; grad_input_i += IDX_2(kernel_bank, KernelIdx, intKernelDepth) * IDX_4(grad_output, intBatch, intDepthOut, intY - intKernelY, intX - intKernelX); //grad_input_i += IDX_4(kernel, intBatch, intKernelDepth, intY - intKernelY, intX - intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY - intKernelY, intX - intKernelX); } //} } } atomicAdd(&(grad_input[OutIdx]), grad_input_i); //grad_input[intIndex] = grad_input_i; } //Define kernel backward operations __global__ void CSKernelConv2D_backward_function_kernel( const int n_grad_kernel, const int kernel_size, const float* input, const long4 input_shape, const long4 input_stride, const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride, float* grad_kernel, const long4 grad_kernel_shape, const long4 grad_kernel_stride, const int* buckets, const long4 buckets_shape, const long4 buckets_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_grad_kernel) { return; } int isBias = 0; int nFeatures = VEC_1(input_shape); //int intBatch = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size / VEC_1(grad_kernel_shape)) % VEC_0(grad_kernel_shape); //int intDepth = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size ) % VEC_1(grad_kernel_shape); //int intKernelY = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size ) % kernel_size; //int intKernelX = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) ) % kernel_size; //int intY = ( intIndex / VEC_3(grad_kernel_shape) ) % VEC_2(grad_kernel_shape); //int intX = ( intIndex ) % VEC_3(grad_kernel_shape); int intBatch = ( intIndex / VEC_1(grad_kernel_shape) / VEC_2(grad_output_shape) / VEC_3(grad_output_shape)) % VEC_0(grad_output_shape); int intKernelDepth = ( intIndex / VEC_2(grad_output_shape) / VEC_3(grad_output_shape)) % VEC_1(grad_kernel_shape); int intY = ( intIndex / VEC_3(grad_output_shape) ) % VEC_2(grad_output_shape); int intX = ( intIndex ) % VEC_3(grad_output_shape); //int intInChannel = intDepth / nFeatures; //int intOutChannel = intDepth % nFeatures; int KernelIdx = IDX_3(buckets,intBatch, intY, intX); int intOutChannel = intKernelDepth / (nFeatures*kernel_size*kernel_size + 1); int KernelTemp = (intKernelDepth % (nFeatures*kernel_size*kernel_size + 1)); if(KernelTemp == (nFeatures*kernel_size*kernel_size)){ isBias = 1; } // grad_input: B,C,K,K,H,W //grad_kernel[intIndex] = IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX); if(isBias == 0){ int intInChannel = KernelTemp / (kernel_size*kernel_size); int intKernelY = (KernelTemp / kernel_size) % kernel_size; int intKernelX = KernelTemp % kernel_size; atomicAdd(&(IDX_2(grad_kernel, KernelIdx, intKernelDepth)), (IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX))); } else{ atomicAdd(&(IDX_2(grad_kernel, KernelIdx, intKernelDepth)), IDX_4(grad_output, intBatch, intOutChannel, intY, intX)); } } int CSKernelConv2D_backward_cuda_kernel( at::Tensor& input, at::Tensor& kernel_bank, int kernel_size, at::Tensor& grad_output, at::Tensor& grad_input, at::Tensor& grad_kernel, at::Tensor& buckets, hipStream_t stream ) { int n_grad_input = 0; n_grad_input = grad_input.size(0) * grad_input.size(1) * grad_input.size(2) * grad_input.size(3) * grad_output.size(1); int n_grad_kernel = 0; n_grad_kernel = grad_output.size(0) * grad_kernel.size(1) * grad_output.size(2) * grad_output.size(3); hipLaunchKernelGGL(( CSKernelConv2D_backward_function_input), dim3((n_grad_input + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK), 0, stream , n_grad_input, kernel_size, kernel_bank.data<float>(), make_long4(kernel_bank.size(0), kernel_bank.size(1), 1, 1), make_long4(kernel_bank.stride(0), kernel_bank.stride(1), 1, 1), grad_output.data<float>(), make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)), make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)), grad_input.data<float>(), make_long4(grad_input.size(0), grad_input.size(1), grad_input.size(2), grad_input.size(3)), make_long4(grad_input.stride(0), grad_input.stride(1), grad_input.stride(2), grad_input.stride(3)), buckets.data<int>(), make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1), make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1) ); hipLaunchKernelGGL(( CSKernelConv2D_backward_function_kernel), dim3((n_grad_kernel + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK), 0,stream , n_grad_kernel, kernel_size, input.data<float>(), make_long4(input.size(0), input.size(1), input.size(2), input.size(3)), make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)), grad_output.data<float>(), make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)), make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)), grad_kernel.data<float>(), make_long4(grad_kernel.size(0), grad_kernel.size(1), 1, 1), make_long4(grad_kernel.stride(0), grad_kernel.stride(1), 1, 1), buckets.data<int>(), make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1), make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1) ); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in backward_cuda_kernel: %s\n", hipGetErrorString(err)); return 0; } return 1; } #ifdef __cplusplus } #endif
4a1b3bcd97e68e7f55ba312fc725638d36239318.cu
//Update the implemention to support 2D kernel by Shangchen Zhou #include <cuda.h> #include <cuda_runtime.h> #include <ATen/ATen.h> #include "stdio.h" #define THREAD_PER_BLOCK 512 #define VEC_0(ARRAY) ((ARRAY).x) #define VEC_1(ARRAY) ((ARRAY).y) #define VEC_2(ARRAY) ((ARRAY).z) #define VEC_3(ARRAY) ((ARRAY).w) #define IDX_1(ARRAY, X) ((ARRAY)[((X) * (ARRAY##_stride.x))]) #define IDX_2(ARRAY, X, Y) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y))]) #define IDX_3(ARRAY, X, Y, Z) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z))]) #define IDX_4(ARRAY, X, Y, Z, W) ((ARRAY)[((X) * (ARRAY##_stride.x)) + ((Y) * (ARRAY##_stride.y)) + ((Z) * (ARRAY##_stride.z)) + ((W) * (ARRAY##_stride.w))]) #ifdef __cplusplus extern "C" { #endif //Define forward operations // input and output should be of shape [batch_size, n_features, H,W] // kernel should be of shape [batch_size, n_features*n_features*kernel_size*kernel_size, H, W] /* __global__ void KernelConv2D_forward_function( const int n_output, const int kernel_size, const float* input, const long4 input_shape, const long4 input_stride, const float* kernel, const long4 kernel_shape, const long4 kernel_stride, float* output, const long4 output_shape, const long4 output_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_output) { return; } float output_i = 0.0; int nFeatures = VEC_1(output_shape); int intBatch = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape)) % VEC_0(output_shape); int intDepth = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) ) % VEC_1(output_shape); int intY = ( intIndex / VEC_3(output_shape) ) % VEC_2(output_shape); int intX = ( intIndex ) % VEC_3(output_shape); for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) { for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1) { for (int intInChannel=0; intInChannel<nFeatures; intInChannel +=1) { //int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; int intKernelDepth = nFeatures *kernel_size*kernel_size* intInChannel + kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; output_i += IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(kernel, intBatch, intKernelDepth, intY, intX); } } } output[intIndex] = output_i; } */ __global__ void CSKernelConv2D_forward_function( const int n_output, const int kernel_size, const float* input, const long4 input_shape, const long4 input_stride, const float* kernel_bank, const long4 kernel_bank_shape, const long4 kernel_bank_stride, float* output, const long4 output_shape, const long4 output_stride, const int* buckets, const long4 buckets_shape, const long4 buckets_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_output) { return; } float output_i = 0.0; int nFeatures = VEC_1(input_shape); int intBatch = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape) / VEC_1(input_shape)) % VEC_0(output_shape); int intDepthIn = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) / VEC_1(output_shape) ) % VEC_1(input_shape); int intDepthOut = ( intIndex / VEC_3(output_shape) / VEC_2(output_shape) ) % VEC_1(output_shape); int intY = ( intIndex / VEC_3(output_shape) ) % VEC_2(output_shape); int intX = ( intIndex ) % VEC_3(output_shape); int outIndex = intBatch * VEC_3(output_shape) * VEC_2(output_shape) * VEC_1(output_shape) + intDepthOut * VEC_3(output_shape) * VEC_2(output_shape) + intY * VEC_3(output_shape) + intX; int KernelIdx = IDX_3(buckets,intBatch, intY, intX); for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) { for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1) { //for (int intInChannel=0; intInChannel<nFeatures; intInChannel +=1) { //int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; //int intKernelDepth = nFeatures *kernel_size*kernel_size* intInChannel + kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; int intKernelDepth = intDepthOut* (nFeatures *kernel_size*kernel_size + 1) + kernel_size * kernel_size * intDepthIn + kernel_size * intKernelY + intKernelX; output_i += IDX_4(input, intBatch, intDepthIn, intY + intKernelY, intX + intKernelX) * IDX_2(kernel_bank, KernelIdx, intKernelDepth); //} } } if (intDepthIn == 0){ int intBiasDepth = intDepthOut* (nFeatures *kernel_size*kernel_size + 1) + nFeatures *kernel_size*kernel_size; output_i+= IDX_2(kernel_bank, KernelIdx, intBiasDepth); } atomicAdd(&output[outIndex], output_i); //output[intIndex] = output_i; } int CSKernelConv2D_forward_cuda_kernel( at::Tensor& input, at::Tensor& kernel_bank, int kernel_size, at::Tensor& output, at::Tensor& buckets, cudaStream_t stream ) { int n_output = 0; n_output = output.size(0) * output.size(1) * output.size(2) * output.size(3) * input.size(1); CSKernelConv2D_forward_function<<< (n_output + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>( n_output, kernel_size, input.data<float>(), make_long4(input.size(0), input.size(1), input.size(2), input.size(3)), make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)), kernel_bank.data<float>(), make_long4(kernel_bank.size(0), kernel_bank.size(1), 1, 1), make_long4(kernel_bank.stride(0), kernel_bank.stride(1), 1, 1), output.data<float>(), make_long4(output.size(0), output.size(1), output.size(2), output.size(3)), make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)), buckets.data<int>(), make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1), make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1) ); cudaError_t err = cudaGetLastError(); // check for errors if (err != cudaSuccess) { printf("error in forward_cuda_kernel: %s\n", cudaGetErrorString(err)); return 0; } return 1; } /* int KernelConv2D_forward_cuda_kernel( at::Tensor& input, at::Tensor& kernel, int kernel_size, at::Tensor& output, cudaStream_t stream ) { int n_output = 0; n_output = output.size(0) * output.size(1) * output.size(2) * output.size(3); KernelConv2D_forward_function<<< (n_output + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>( n_output, kernel_size, input.data<float>(), make_long4(input.size(0), input.size(1), input.size(2), input.size(3)), make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)), kernel.data<float>(), make_long4(kernel.size(0), kernel.size(1), kernel.size(2), kernel.size(3)), make_long4(kernel.stride(0), kernel.stride(1), kernel.stride(2), kernel.stride(3)), output.data<float>(), make_long4(output.size(0), output.size(1), output.size(2), output.size(3)), make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)) ); cudaError_t err = cudaGetLastError(); // check for errors if (err != cudaSuccess) { printf("error in forward_cuda_kernel: %s\n", cudaGetErrorString(err)); return 0; } return 1; } //Define input backward operations __global__ void KernelConv2D_backward_function_input( const int n_grad_input, const int kernel_size, const float* kernel, const long4 kernel_shape, const long4 kernel_stride, const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride, float* grad_input, const long4 grad_input_shape, const long4 grad_input_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_grad_input) { return; } float grad_input_i = 0.0; int nFeatures = VEC_1(grad_input_shape); int intBatch = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape)) % VEC_0(grad_input_shape); int intDepth = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) ) % VEC_1(grad_input_shape); int intY = ( intIndex / VEC_3(grad_input_shape) ) % VEC_2(grad_input_shape); int intX = ( intIndex ) % VEC_3(grad_input_shape); int kernel_H = VEC_2(kernel_shape); int kernel_W = VEC_3(kernel_shape); for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) { for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1){ for (int intOutChannel=0; intOutChannel<nFeatures; intOutChannel +=1) { // grad_input: B,C,H+k-1,W+k-1 if (intY - intKernelY >= 0 && intY - intKernelY <= kernel_H - 1 && intX - intKernelX >= 0 && intX - intKernelX <= kernel_W - 1){ //int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; int intKernelDepth = nFeatures * kernel_size * kernel_size * intDepth + kernel_size*kernel_size*intOutChannel + kernel_size * intKernelY + intKernelX; grad_input_i += IDX_4(kernel, intBatch, intKernelDepth, intY - intKernelY, intX - intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY - intKernelY, intX - intKernelX); } } } } grad_input[intIndex] = grad_input_i; } //Define kernel backward operations __global__ void KernelConv2D_backward_function_kernel( const int n_grad_kernel, const int kernel_size, const float* input, const long4 input_shape, const long4 input_stride, const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride, float* grad_kernel, const long4 grad_kernel_shape, const long4 grad_kernel_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_grad_kernel) { return; } int nFeatures = VEC_1(input_shape); int intBatch = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size / VEC_1(grad_kernel_shape)) % VEC_0(grad_kernel_shape); int intDepth = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size ) % VEC_1(grad_kernel_shape); int intKernelY = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size ) % kernel_size; int intKernelX = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) ) % kernel_size; int intY = ( intIndex / VEC_3(grad_kernel_shape) ) % VEC_2(grad_kernel_shape); int intX = ( intIndex ) % VEC_3(grad_kernel_shape); int intInChannel = intDepth / nFeatures; int intOutChannel = intDepth % nFeatures; // grad_input: B,C,K,K,H,W grad_kernel[intIndex] = IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX); } int KernelConv2D_backward_cuda_kernel( at::Tensor& input, at::Tensor& kernel, int kernel_size, at::Tensor& grad_output, at::Tensor& grad_input, at::Tensor& grad_kernel, cudaStream_t stream ) { int n_grad_input = 0; n_grad_input = grad_input.size(0) * grad_input.size(1) * grad_input.size(2) * grad_input.size(3); int n_grad_kernel = 0; n_grad_kernel = grad_kernel.size(0) * grad_kernel.size(1) * grad_kernel.size(2) * grad_kernel.size(3); KernelConv2D_backward_function_input<<< (n_grad_input + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>( n_grad_input, kernel_size, kernel.data<float>(), make_long4(kernel.size(0), kernel.size(1), kernel.size(2), kernel.size(3)), make_long4(kernel.stride(0), kernel.stride(1), kernel.stride(2), kernel.stride(3)), grad_output.data<float>(), make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)), make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)), grad_input.data<float>(), make_long4(grad_input.size(0), grad_input.size(1), grad_input.size(2), grad_input.size(3)), make_long4(grad_input.stride(0), grad_input.stride(1), grad_input.stride(2), grad_input.stride(3)) ); KernelConv2D_backward_function_kernel<<< (n_grad_kernel + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0,stream >>>( n_grad_kernel, kernel_size, input.data<float>(), make_long4(input.size(0), input.size(1), input.size(2), input.size(3)), make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)), grad_output.data<float>(), make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)), make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)), grad_kernel.data<float>(), make_long4(grad_kernel.size(0), grad_kernel.size(1), grad_kernel.size(2), grad_kernel.size(3)), make_long4(grad_kernel.stride(0), grad_kernel.stride(1), grad_kernel.stride(2), grad_kernel.stride(3)) ); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in backward_cuda_kernel: %s\n", cudaGetErrorString(err)); return 0; } return 1; } */ //Define input backward operations __global__ void CSKernelConv2D_backward_function_input( const int n_grad_input, const int kernel_size, const float* kernel_bank, const long4 kernel_bank_shape, const long4 kernel_bank_stride, const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride, float* grad_input, const long4 grad_input_shape, const long4 grad_input_stride, const int* buckets, const long4 buckets_shape, const long4 buckets_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_grad_input) { return; } float grad_input_i = 0.0; int nFeatures = VEC_1(grad_input_shape); int intBatch = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape) / VEC_1(grad_output_shape)) % VEC_0(grad_input_shape); int intDepthOut = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) / VEC_1(grad_input_shape) ) % VEC_1(grad_output_shape); int intDepthIn = ( intIndex / VEC_3(grad_input_shape) / VEC_2(grad_input_shape) ) % VEC_1(grad_input_shape); int intY = ( intIndex / VEC_3(grad_input_shape) ) % VEC_2(grad_input_shape); int intX = ( intIndex ) % VEC_3(grad_input_shape); int OutIdx = intBatch * VEC_3(grad_input_shape) * VEC_2(grad_input_shape) * VEC_1(grad_input_shape) + intDepthIn * VEC_3(grad_input_shape) * VEC_2(grad_input_shape) + intY * VEC_3(grad_input_shape) + intX; int kernel_H = VEC_2(grad_output_shape); int kernel_W = VEC_3(grad_output_shape); for (int intKernelY = 0; intKernelY < kernel_size; intKernelY += 1) { for (int intKernelX = 0; intKernelX < kernel_size; intKernelX += 1){ //for (int intOutChannel=0; intOutChannel<nFeatures; intOutChannel +=1) { // grad_input: B,C,H+k-1,W+k-1 if (intY - intKernelY >= 0 && intY - intKernelY <= kernel_H - 1 && intX - intKernelX >= 0 && intX - intKernelX <= kernel_W - 1){ //int intKernelDepth = kernel_size * kernel_size * intDepth + kernel_size * intKernelY + intKernelX; int KernelIdx = IDX_3(buckets,intBatch, intY - intKernelY, intX - intKernelX); int intKernelDepth = (nFeatures * kernel_size * kernel_size + 1)* intDepthOut + kernel_size*kernel_size*intDepthIn + kernel_size * intKernelY + intKernelX; grad_input_i += IDX_2(kernel_bank, KernelIdx, intKernelDepth) * IDX_4(grad_output, intBatch, intDepthOut, intY - intKernelY, intX - intKernelX); //grad_input_i += IDX_4(kernel, intBatch, intKernelDepth, intY - intKernelY, intX - intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY - intKernelY, intX - intKernelX); } //} } } atomicAdd(&(grad_input[OutIdx]), grad_input_i); //grad_input[intIndex] = grad_input_i; } //Define kernel backward operations __global__ void CSKernelConv2D_backward_function_kernel( const int n_grad_kernel, const int kernel_size, const float* input, const long4 input_shape, const long4 input_stride, const float* grad_output, const long4 grad_output_shape, const long4 grad_output_stride, float* grad_kernel, const long4 grad_kernel_shape, const long4 grad_kernel_stride, const int* buckets, const long4 buckets_shape, const long4 buckets_stride ) { int intIndex = blockIdx.x * blockDim.x + threadIdx.x; if (intIndex >= n_grad_kernel) { return; } int isBias = 0; int nFeatures = VEC_1(input_shape); //int intBatch = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size / VEC_1(grad_kernel_shape)) % VEC_0(grad_kernel_shape); //int intDepth = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size / kernel_size ) % VEC_1(grad_kernel_shape); //int intKernelY = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) / kernel_size ) % kernel_size; //int intKernelX = ( intIndex / VEC_3(grad_kernel_shape) / VEC_2(grad_kernel_shape) ) % kernel_size; //int intY = ( intIndex / VEC_3(grad_kernel_shape) ) % VEC_2(grad_kernel_shape); //int intX = ( intIndex ) % VEC_3(grad_kernel_shape); int intBatch = ( intIndex / VEC_1(grad_kernel_shape) / VEC_2(grad_output_shape) / VEC_3(grad_output_shape)) % VEC_0(grad_output_shape); int intKernelDepth = ( intIndex / VEC_2(grad_output_shape) / VEC_3(grad_output_shape)) % VEC_1(grad_kernel_shape); int intY = ( intIndex / VEC_3(grad_output_shape) ) % VEC_2(grad_output_shape); int intX = ( intIndex ) % VEC_3(grad_output_shape); //int intInChannel = intDepth / nFeatures; //int intOutChannel = intDepth % nFeatures; int KernelIdx = IDX_3(buckets,intBatch, intY, intX); int intOutChannel = intKernelDepth / (nFeatures*kernel_size*kernel_size + 1); int KernelTemp = (intKernelDepth % (nFeatures*kernel_size*kernel_size + 1)); if(KernelTemp == (nFeatures*kernel_size*kernel_size)){ isBias = 1; } // grad_input: B,C,K,K,H,W //grad_kernel[intIndex] = IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX); if(isBias == 0){ int intInChannel = KernelTemp / (kernel_size*kernel_size); int intKernelY = (KernelTemp / kernel_size) % kernel_size; int intKernelX = KernelTemp % kernel_size; atomicAdd(&(IDX_2(grad_kernel, KernelIdx, intKernelDepth)), (IDX_4(input, intBatch, intInChannel, intY + intKernelY, intX + intKernelX) * IDX_4(grad_output, intBatch, intOutChannel, intY, intX))); } else{ atomicAdd(&(IDX_2(grad_kernel, KernelIdx, intKernelDepth)), IDX_4(grad_output, intBatch, intOutChannel, intY, intX)); } } int CSKernelConv2D_backward_cuda_kernel( at::Tensor& input, at::Tensor& kernel_bank, int kernel_size, at::Tensor& grad_output, at::Tensor& grad_input, at::Tensor& grad_kernel, at::Tensor& buckets, cudaStream_t stream ) { int n_grad_input = 0; n_grad_input = grad_input.size(0) * grad_input.size(1) * grad_input.size(2) * grad_input.size(3) * grad_output.size(1); int n_grad_kernel = 0; n_grad_kernel = grad_output.size(0) * grad_kernel.size(1) * grad_output.size(2) * grad_output.size(3); CSKernelConv2D_backward_function_input<<< (n_grad_input + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0, stream >>>( n_grad_input, kernel_size, kernel_bank.data<float>(), make_long4(kernel_bank.size(0), kernel_bank.size(1), 1, 1), make_long4(kernel_bank.stride(0), kernel_bank.stride(1), 1, 1), grad_output.data<float>(), make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)), make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)), grad_input.data<float>(), make_long4(grad_input.size(0), grad_input.size(1), grad_input.size(2), grad_input.size(3)), make_long4(grad_input.stride(0), grad_input.stride(1), grad_input.stride(2), grad_input.stride(3)), buckets.data<int>(), make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1), make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1) ); CSKernelConv2D_backward_function_kernel<<< (n_grad_kernel + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK, THREAD_PER_BLOCK, 0,stream >>>( n_grad_kernel, kernel_size, input.data<float>(), make_long4(input.size(0), input.size(1), input.size(2), input.size(3)), make_long4(input.stride(0), input.stride(1), input.stride(2), input.stride(3)), grad_output.data<float>(), make_long4(grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)), make_long4(grad_output.stride(0), grad_output.stride(1), grad_output.stride(2), grad_output.stride(3)), grad_kernel.data<float>(), make_long4(grad_kernel.size(0), grad_kernel.size(1), 1, 1), make_long4(grad_kernel.stride(0), grad_kernel.stride(1), 1, 1), buckets.data<int>(), make_long4(buckets.size(0), buckets.size(1), buckets.size(2), 1), make_long4(buckets.stride(0), buckets.stride(1), buckets.stride(2), 1) ); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in backward_cuda_kernel: %s\n", cudaGetErrorString(err)); return 0; } return 1; } #ifdef __cplusplus } #endif
af3b745af98a4c63060363fdea14fd7caec03a86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "simple_cuda_iterate.h" __global__ void simple_quadrants_iterate( sc_array_t* quadrants, p4est_ghost_t* ghost_layer, p4est_t* p4est, p4est_topidx_t treeId, void* user_data, cuda_iter_volume_t iter_volume, size_t quads_count, size_t quads_per_thread) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < quads_count) { size_t elems_remaining; if(i >= quads_count - quads_per_thread) { elems_remaining = quads_count - i; } else { elems_remaining = quads_per_thread; } for(int j = 0; j < elems_remaining; j++) { iter_volume( p4est, ghost_layer, p4est_device_quadrant_array_index(quadrants, i + j), i+j, treeId, user_data ); } } } void run_setup_kernel_volume_callback(cuda_iter_volume_api_t* iter_volume_api, cuda_iter_volume_t* d_callback) { iter_volume_api-hipLaunchKernelGGL((>setup_kernel), dim3(1),dim3(1), 0, 0, d_callback); } void run_setup_kernel_face_callback(cuda_iter_face_api_t* iter_face_api, cuda_iter_face_t* d_callback) { iter_face_api-hipLaunchKernelGGL((>setup_kernel), dim3(1),dim3(1), 0, 0, d_callback); } void run_simple_quadrants_iterate(sc_array_t* quadrants, p4est_ghost_t* ghost_layer, p4est_t* p4est, p4est_topidx_t treeId, void* user_data, cuda_iter_volume_t iter_volume, size_t quads_count, size_t quads_per_thread, size_t needed_block_count, size_t threads_per_block ) { hipLaunchKernelGGL(( simple_quadrants_iterate), dim3(needed_block_count), dim3(threads_per_block), 0, 0, quadrants, ghost_layer, p4est, treeId, user_data, iter_volume, quads_count, quads_per_thread ); gpuErrchk(hipDeviceSynchronize()); } __global__ void simple_faces_iterate( p4est_t* p4est, p4est_ghost_t* ghost_layer, sc_array_t* quadrants, p4est_iter_face_side_t* faces, size_t faces_count, void* user_data, cuda_iter_face_t iter_face, size_t faces_per_iter, size_t faces_per_thread) { sc_array_t *ghost_quadrants = &(ghost_layer->ghosts); int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < faces_count) { size_t elems_remaining; if(i >= faces_count - faces_per_thread) { elems_remaining = faces_count - i; } else { elems_remaining = faces_per_thread; } p4est_iter_face_side_t* cursor = faces + i * faces_per_iter; for(int j = 0; j < elems_remaining; j++, cursor+=faces_per_iter) { p4est_iter_face_side_t* current_face = cursor; if(current_face->is_hanging) { p4est_quadrant_t** quads = current_face->is.hanging.quad; p4est_locidx_t *quadid = current_face->is.hanging.quadid; if(current_face->is.hanging.is_ghost[0]){ quads[0] = p4est_device_quadrant_array_index(ghost_quadrants, quadid[0]); } else { quads[0] = p4est_device_quadrant_array_index(quadrants, quadid[0]); } if(current_face->is.hanging.is_ghost[1]) { quads[1] = p4est_device_quadrant_array_index(ghost_quadrants, quadid[1]); } else { quads[1] = p4est_device_quadrant_array_index(quadrants, quadid[1]); } } else { if(current_face->is.full.is_ghost){ current_face->is.full.quad = p4est_device_quadrant_array_index(ghost_quadrants, current_face->is.full.quadid); } else { current_face->is.full.quad = p4est_device_quadrant_array_index(quadrants, current_face->is.full.quadid); } } current_face++; if(current_face->is_hanging) { p4est_quadrant_t** quads = current_face->is.hanging.quad; p4est_locidx_t *quadid = current_face->is.hanging.quadid; if(current_face->is.hanging.is_ghost[0]){ quads[0] = p4est_device_quadrant_array_index(ghost_quadrants, quadid[0]); } else { quads[0] = p4est_device_quadrant_array_index(quadrants, quadid[0]); } if(current_face->is.hanging.is_ghost[1]) { quads[1] = p4est_device_quadrant_array_index(ghost_quadrants, quadid[1]); } else { quads[1] = p4est_device_quadrant_array_index(quadrants, quadid[1]); } } else { if(current_face->is.full.is_ghost){ current_face->is.full.quad = p4est_device_quadrant_array_index(ghost_quadrants, current_face->is.full.quadid); } else { current_face->is.full.quad = p4est_device_quadrant_array_index(quadrants, current_face->is.full.quadid); } } iter_face( p4est, ghost_layer, cursor, user_data ); } } } void run_simple_faces_iterate(p4est_t* p4est, p4est_ghost_t* ghost_layer, sc_array_t* quadrants, p4est_iter_face_side_t* faces, size_t faces_count, void* user_data, cuda_iter_face_t iter_face, size_t faces_per_iter, size_t faces_per_thread, size_t needed_block_count, size_t threads_per_block) { hipLaunchKernelGGL(( simple_faces_iterate), dim3(needed_block_count), dim3(threads_per_block), 0, 0, p4est, ghost_layer, quadrants, faces, faces_count, user_data, iter_face, faces_per_iter, faces_per_thread ); gpuErrchk(hipDeviceSynchronize()); }
af3b745af98a4c63060363fdea14fd7caec03a86.cu
#include "simple_cuda_iterate.h" __global__ void simple_quadrants_iterate( sc_array_t* quadrants, p4est_ghost_t* ghost_layer, p4est_t* p4est, p4est_topidx_t treeId, void* user_data, cuda_iter_volume_t iter_volume, size_t quads_count, size_t quads_per_thread) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < quads_count) { size_t elems_remaining; if(i >= quads_count - quads_per_thread) { elems_remaining = quads_count - i; } else { elems_remaining = quads_per_thread; } for(int j = 0; j < elems_remaining; j++) { iter_volume( p4est, ghost_layer, p4est_device_quadrant_array_index(quadrants, i + j), i+j, treeId, user_data ); } } } void run_setup_kernel_volume_callback(cuda_iter_volume_api_t* iter_volume_api, cuda_iter_volume_t* d_callback) { iter_volume_api->setup_kernel<<<1,1>>>(d_callback); } void run_setup_kernel_face_callback(cuda_iter_face_api_t* iter_face_api, cuda_iter_face_t* d_callback) { iter_face_api->setup_kernel<<<1,1>>>(d_callback); } void run_simple_quadrants_iterate(sc_array_t* quadrants, p4est_ghost_t* ghost_layer, p4est_t* p4est, p4est_topidx_t treeId, void* user_data, cuda_iter_volume_t iter_volume, size_t quads_count, size_t quads_per_thread, size_t needed_block_count, size_t threads_per_block ) { simple_quadrants_iterate<<<needed_block_count, threads_per_block>>>( quadrants, ghost_layer, p4est, treeId, user_data, iter_volume, quads_count, quads_per_thread ); gpuErrchk(cudaDeviceSynchronize()); } __global__ void simple_faces_iterate( p4est_t* p4est, p4est_ghost_t* ghost_layer, sc_array_t* quadrants, p4est_iter_face_side_t* faces, size_t faces_count, void* user_data, cuda_iter_face_t iter_face, size_t faces_per_iter, size_t faces_per_thread) { sc_array_t *ghost_quadrants = &(ghost_layer->ghosts); int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < faces_count) { size_t elems_remaining; if(i >= faces_count - faces_per_thread) { elems_remaining = faces_count - i; } else { elems_remaining = faces_per_thread; } p4est_iter_face_side_t* cursor = faces + i * faces_per_iter; for(int j = 0; j < elems_remaining; j++, cursor+=faces_per_iter) { p4est_iter_face_side_t* current_face = cursor; if(current_face->is_hanging) { p4est_quadrant_t** quads = current_face->is.hanging.quad; p4est_locidx_t *quadid = current_face->is.hanging.quadid; if(current_face->is.hanging.is_ghost[0]){ quads[0] = p4est_device_quadrant_array_index(ghost_quadrants, quadid[0]); } else { quads[0] = p4est_device_quadrant_array_index(quadrants, quadid[0]); } if(current_face->is.hanging.is_ghost[1]) { quads[1] = p4est_device_quadrant_array_index(ghost_quadrants, quadid[1]); } else { quads[1] = p4est_device_quadrant_array_index(quadrants, quadid[1]); } } else { if(current_face->is.full.is_ghost){ current_face->is.full.quad = p4est_device_quadrant_array_index(ghost_quadrants, current_face->is.full.quadid); } else { current_face->is.full.quad = p4est_device_quadrant_array_index(quadrants, current_face->is.full.quadid); } } current_face++; if(current_face->is_hanging) { p4est_quadrant_t** quads = current_face->is.hanging.quad; p4est_locidx_t *quadid = current_face->is.hanging.quadid; if(current_face->is.hanging.is_ghost[0]){ quads[0] = p4est_device_quadrant_array_index(ghost_quadrants, quadid[0]); } else { quads[0] = p4est_device_quadrant_array_index(quadrants, quadid[0]); } if(current_face->is.hanging.is_ghost[1]) { quads[1] = p4est_device_quadrant_array_index(ghost_quadrants, quadid[1]); } else { quads[1] = p4est_device_quadrant_array_index(quadrants, quadid[1]); } } else { if(current_face->is.full.is_ghost){ current_face->is.full.quad = p4est_device_quadrant_array_index(ghost_quadrants, current_face->is.full.quadid); } else { current_face->is.full.quad = p4est_device_quadrant_array_index(quadrants, current_face->is.full.quadid); } } iter_face( p4est, ghost_layer, cursor, user_data ); } } } void run_simple_faces_iterate(p4est_t* p4est, p4est_ghost_t* ghost_layer, sc_array_t* quadrants, p4est_iter_face_side_t* faces, size_t faces_count, void* user_data, cuda_iter_face_t iter_face, size_t faces_per_iter, size_t faces_per_thread, size_t needed_block_count, size_t threads_per_block) { simple_faces_iterate<<<needed_block_count, threads_per_block>>>( p4est, ghost_layer, quadrants, faces, faces_count, user_data, iter_face, faces_per_iter, faces_per_thread ); gpuErrchk(cudaDeviceSynchronize()); }
b4475bc3ca9020571bf32c0dac9e126775961dc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void vec_add_kernel(float *c, float *a, float *b, int n) { int i = 0; // Oops! Something is not right here, please fix it! if (i < n) { c[i] = a[i] + b[i]; } }
b4475bc3ca9020571bf32c0dac9e126775961dc2.cu
__global__ void vec_add_kernel(float *c, float *a, float *b, int n) { int i = 0; // Oops! Something is not right here, please fix it! if (i < n) { c[i] = a[i] + b[i]; } }
65d237984a958c83908dae800b637a4e0efe0dd2.hip
// !!! This is a file automatically generated by hipify!!! #include "../../../cuda/CUDADevicesService.hpp" #include "../../../datatransfer/DataTransfer.cuh" #include "../LevenbergMarquardtFletcher/CudaStepVariables.cuh" #include "../LevenbergMarquardtFletcher/CudaCalculateAdjustedRsquareValue.cuh" #include "../../../models/cuda/Statics.cuh" #include "../../../models/cuda/Model.cuh" #include "../ErrorHandling.cuh" #include "../MemoryHandling.cuh" #include <hip/hip_runtime.h> #include "CudaLevenbergMarquardtCore.cu" template<typename NTOUTPUT, typename NTCALC> __global__ void CudaLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int columnCount, int rowCount, int echoesCount, int sharedMemoryPerThread, NTCALC* weights, NTCALC* x_values, NTCALC* artifacts, NTOUTPUT* output, NTCALC EPSILON_1, NTCALC EPSILON_2) { int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= rowCount) return; #ifdef USE_SHARED_MEMORY extern __shared__ NTCALC sharedMemory[]; NTCALC* threadSharedMemory = sharedMemory + threadIdx.x * sharedMemoryPerThread; NTCALC* parameters = threadSharedMemory; #else NTCALC* parameters = artifacts + index * sharedMemoryPerThread; #endif CudaStepVariables<NTCALC> stepVariables; stepVariables.y_hat = parameters + parametersCount; stepVariables.dydp = stepVariables.y_hat + echoesCount; stepVariables.alpha = stepVariables.dydp + echoesCount * parametersCount; stepVariables.beta = stepVariables.alpha + parametersCount * parametersCount; //parameters start value cudaModelFunction( startModelFunctionID, x_values, (NTCALC*)NULL, echoesCount, parameters, parametersCount, (NTCALC*)NULL, 0); __syncthreads(); NTCALC parameters_min[3]; NTCALC parameters_max[3]; for(int index = 0; index < parametersCount; ++index) { parameters_min[index] = processConstants[7 + index * 2]; parameters_max[index] = processConstants[7 + index * 2 + 1]; } NTCALC parameters_try[3]; NTCALC delta_p[3]; int sliceIndex = (int)tex2D(floatTexture, echoesCount, index); CudaLevenbergMarquardtCore( modelFunctionID, residualWeightingFunctionID, alphaWeightingFunctionID, columnCount, rowCount, echoesCount, weights + sliceIndex * echoesCount, x_values, stepVariables.beta + parametersCount, parameters, parametersCount, parameters_min, parameters_max, delta_p, parameters_try, &stepVariables, EPSILON_1, EPSILON_2); __syncthreads(); //p1 (T1/T2) output[index * (parametersCount + 1)] = (NTOUTPUT)parameters[0]; //p2 (M0) output[index * (parametersCount + 1) + 1] = (NTOUTPUT)parameters[1]; //p3 (FA) if(parametersCount == 3) output[index * (parametersCount + 1) + 2] = (NTOUTPUT)parameters[2]; //GOF bool calc = false; for(int index = 0; index < parametersCount; ++index) if(parameters[index] != 1) { calc = true; break; } output[index * (parametersCount + 1) + parametersCount] = calc ? (NTOUTPUT)CudaCalculateAdjustedRsquareValue( stepVariables.y_hat, echoesCount, parametersCount) : 0; } template<typename NTINPUT, typename NTOUTPUT, typename NTCALC> NTOUTPUT* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, NTCALC* echotimes, NTCALC* weights, NTINPUT* data, NTCALC threshold, NTCALC* constants, int constantsLength, NTCALC* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, NTCALC EPSILON_1, NTCALC EPSILON_2) { int sliceSize = columnCount * rowCount; int dataLength = sliceSize * sliceCount; int echoesCount = endIndex - startIndex + 1; //mark valid data bool validData[dataLength]; int validDataLength = MarkValidData(data, validData, dataLength, threshold); int restructeredColumnsCount = echoesCount + 1; //for sliceIndex if(needsFlipAnglesData) ++restructeredColumnsCount; if(needsT1Data) ++restructeredColumnsCount; //restructure data NTCALC* restructeredData = new NTCALC[validDataLength * restructeredColumnsCount]; RestructureDataForward( data, needsFlipAnglesData ? constants + 7 : (float*)NULL, needsT1Data ? constants + 7 + sliceSize : (float*)NULL, validData, dataLength, sliceSize, echoesCount, restructeredData); int maxTexture2DHeight = CUDADevicesService::getMaximumTexture2DHeight(); int kernelCalls = validDataLength / maxTexture2DHeight; if(validDataLength % maxTexture2DHeight != 0) ++kernelCalls; int restructeredRowsCount = min(maxTexture2DHeight, validDataLength); //processConstants float constBuilder[263] = {}; for(int index = 0; index < 7; ++index) constBuilder[index] = constants[index]; for(int index = 0; index < parametersCount * 2; ++index) constBuilder[index + 7] = parameterBoundaries[index]; for(int index = 0; index < echoesCount; ++index) constBuilder[index + 13] = echotimes[index]; HANDLE_CUDA_ERROR(hipMemcpyToSymbol(processConstants, constBuilder, 263 * sizeof(float), 0, hipMemcpyHostToDevice)); NTCALC* gpu_x_values = NULL; //processConstants + 13; //AllocAndCopyToDevice(echotimes, echoesCount); NTCALC* gpu_weights = AllocAndCopyToDevice(weights, echoesCount * sliceCount); NTOUTPUT* gpu_output = AllocOnDevice<NTOUTPUT>(validDataLength * (parametersCount + 1)); //CudaArray + Texture for restructered data hipChannelFormatDesc channelFormatDesc = hipCreateChannelDesc<NTCALC>(); hipExtent restructeredDataExtent = make_hipExtent(restructeredColumnsCount, restructeredRowsCount, 0); hipArray* dataArray = NULL; HANDLE_CUDA_ERROR(hipMalloc3DArray( &dataArray, &channelFormatDesc, restructeredDataExtent, hipArrayDefault)); //TODO on better gpus -> for now global memory //Calc needed shared memory and matching threads/blocks int sharedMemoryPerThread = parametersCount + //params echoesCount + //y_hat echoesCount * parametersCount + //dydp parametersCount * parametersCount + //alpha parametersCount + //beta echoesCount; //temp #ifdef USE_SHARED_MEMORY int sharedMemoryPerThreadBytes = sharedMemoryPerThread * sizeof(NTCALC); int availableSharedMemoryPerBlock = CUDADevicesService::getSharedMemoryPerBlock(); int threadsPerBlock = min((int)(availableSharedMemoryPerBlock / sharedMemoryPerThreadBytes), int threadCount); #else NTCALC* gpu_artifacts = AllocOnDevice<NTCALC>(sharedMemoryPerThread * restructeredRowsCount); int threadsPerBlock = threadCount; #endif for(int kernelIndex = 0; kernelIndex < kernelCalls; ++kernelIndex) { int remainingValidDataLength = validDataLength - kernelIndex * restructeredRowsCount; int callRowsCount = min(restructeredRowsCount, remainingValidDataLength); HANDLE_CUDA_ERROR(hipMemcpyToArray(dataArray, 0, 0, restructeredData + kernelIndex * restructeredColumnsCount * restructeredRowsCount, restructeredColumnsCount * sizeof(NTCALC) * callRowsCount, hipMemcpyHostToDevice)); HANDLE_CUDA_ERROR( hipBindTextureToArray(floatTexture, dataArray, channelFormatDesc)); int blockCount = (callRowsCount + threadsPerBlock - 1) / threadsPerBlock; #ifdef USE_SHARED_MEMORY hipLaunchKernelGGL(( CudaLevenbergMarquardt), dim3(blockCount), dim3(threadsPerBlock), sharedMemoryPerThreadBytes, 0, startModelFunctionID, modelFunctionID, residualWeightingFunctionID, alphaWeightingFunctionID, parametersCount, restructeredColumnsCount, restructeredRowsCount, echoesCount, sharedMemoryPerThread, gpu_weights, gpu_x_values, (NTCALC*)NULL, gpu_output + kernelIndex * restructeredRowsCount * (parametersCount + 1), EPSILON_1, EPSILON_2); #else hipLaunchKernelGGL(( CudaLevenbergMarquardt), dim3(blockCount), dim3(threadsPerBlock), 0, 0, startModelFunctionID, modelFunctionID, residualWeightingFunctionID, alphaWeightingFunctionID, parametersCount, restructeredColumnsCount, restructeredRowsCount, echoesCount, sharedMemoryPerThread, gpu_weights, gpu_x_values, gpu_artifacts, gpu_output + kernelIndex * restructeredRowsCount * (parametersCount + 1), EPSILON_1, EPSILON_2); #endif HANDLE_CUDA_ERROR(hipGetLastError()); HANDLE_CUDA_ERROR(hipUnbindTexture(floatTexture)); } NTOUTPUT* output = new NTOUTPUT[dataLength * (parametersCount + 1)]; NTOUTPUT* restructeredOutput = CopyFromDeviceAndFree(gpu_output, validDataLength * (parametersCount + 1)); HANDLE_CUDA_ERROR(hipGetLastError()); RestructureDataBackward(output, validData, dataLength, restructeredOutput, parametersCount, true); free(restructeredData); free(restructeredOutput); #ifndef USE_SHARED_MEMORY hipFree(gpu_artifacts); #endif hipFree(gpu_weights); //hipFree(gpu_x_values); hipFreeArray(dataArray); return output; } template short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template float* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template unsigned short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, float* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template float* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, float* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template unsigned short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, float* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, unsigned short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template float* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, unsigned short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template unsigned short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, unsigned short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2);
65d237984a958c83908dae800b637a4e0efe0dd2.cu
#include "../../../cuda/CUDADevicesService.hpp" #include "../../../datatransfer/DataTransfer.cuh" #include "../LevenbergMarquardtFletcher/CudaStepVariables.cuh" #include "../LevenbergMarquardtFletcher/CudaCalculateAdjustedRsquareValue.cuh" #include "../../../models/cuda/Statics.cuh" #include "../../../models/cuda/Model.cuh" #include "../ErrorHandling.cuh" #include "../MemoryHandling.cuh" #include <cuda_runtime.h> #include "CudaLevenbergMarquardtCore.cu" template<typename NTOUTPUT, typename NTCALC> __global__ void CudaLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int columnCount, int rowCount, int echoesCount, int sharedMemoryPerThread, NTCALC* weights, NTCALC* x_values, NTCALC* artifacts, NTOUTPUT* output, NTCALC EPSILON_1, NTCALC EPSILON_2) { int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= rowCount) return; #ifdef USE_SHARED_MEMORY extern __shared__ NTCALC sharedMemory[]; NTCALC* threadSharedMemory = sharedMemory + threadIdx.x * sharedMemoryPerThread; NTCALC* parameters = threadSharedMemory; #else NTCALC* parameters = artifacts + index * sharedMemoryPerThread; #endif CudaStepVariables<NTCALC> stepVariables; stepVariables.y_hat = parameters + parametersCount; stepVariables.dydp = stepVariables.y_hat + echoesCount; stepVariables.alpha = stepVariables.dydp + echoesCount * parametersCount; stepVariables.beta = stepVariables.alpha + parametersCount * parametersCount; //parameters start value cudaModelFunction( startModelFunctionID, x_values, (NTCALC*)NULL, echoesCount, parameters, parametersCount, (NTCALC*)NULL, 0); __syncthreads(); NTCALC parameters_min[3]; NTCALC parameters_max[3]; for(int index = 0; index < parametersCount; ++index) { parameters_min[index] = processConstants[7 + index * 2]; parameters_max[index] = processConstants[7 + index * 2 + 1]; } NTCALC parameters_try[3]; NTCALC delta_p[3]; int sliceIndex = (int)tex2D(floatTexture, echoesCount, index); CudaLevenbergMarquardtCore( modelFunctionID, residualWeightingFunctionID, alphaWeightingFunctionID, columnCount, rowCount, echoesCount, weights + sliceIndex * echoesCount, x_values, stepVariables.beta + parametersCount, parameters, parametersCount, parameters_min, parameters_max, delta_p, parameters_try, &stepVariables, EPSILON_1, EPSILON_2); __syncthreads(); //p1 (T1/T2) output[index * (parametersCount + 1)] = (NTOUTPUT)parameters[0]; //p2 (M0) output[index * (parametersCount + 1) + 1] = (NTOUTPUT)parameters[1]; //p3 (FA) if(parametersCount == 3) output[index * (parametersCount + 1) + 2] = (NTOUTPUT)parameters[2]; //GOF bool calc = false; for(int index = 0; index < parametersCount; ++index) if(parameters[index] != 1) { calc = true; break; } output[index * (parametersCount + 1) + parametersCount] = calc ? (NTOUTPUT)CudaCalculateAdjustedRsquareValue( stepVariables.y_hat, echoesCount, parametersCount) : 0; } template<typename NTINPUT, typename NTOUTPUT, typename NTCALC> NTOUTPUT* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, NTCALC* echotimes, NTCALC* weights, NTINPUT* data, NTCALC threshold, NTCALC* constants, int constantsLength, NTCALC* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, NTCALC EPSILON_1, NTCALC EPSILON_2) { int sliceSize = columnCount * rowCount; int dataLength = sliceSize * sliceCount; int echoesCount = endIndex - startIndex + 1; //mark valid data bool validData[dataLength]; int validDataLength = MarkValidData(data, validData, dataLength, threshold); int restructeredColumnsCount = echoesCount + 1; //for sliceIndex if(needsFlipAnglesData) ++restructeredColumnsCount; if(needsT1Data) ++restructeredColumnsCount; //restructure data NTCALC* restructeredData = new NTCALC[validDataLength * restructeredColumnsCount]; RestructureDataForward( data, needsFlipAnglesData ? constants + 7 : (float*)NULL, needsT1Data ? constants + 7 + sliceSize : (float*)NULL, validData, dataLength, sliceSize, echoesCount, restructeredData); int maxTexture2DHeight = CUDADevicesService::getMaximumTexture2DHeight(); int kernelCalls = validDataLength / maxTexture2DHeight; if(validDataLength % maxTexture2DHeight != 0) ++kernelCalls; int restructeredRowsCount = min(maxTexture2DHeight, validDataLength); //processConstants float constBuilder[263] = {}; for(int index = 0; index < 7; ++index) constBuilder[index] = constants[index]; for(int index = 0; index < parametersCount * 2; ++index) constBuilder[index + 7] = parameterBoundaries[index]; for(int index = 0; index < echoesCount; ++index) constBuilder[index + 13] = echotimes[index]; HANDLE_CUDA_ERROR(cudaMemcpyToSymbol(processConstants, constBuilder, 263 * sizeof(float), 0, cudaMemcpyHostToDevice)); NTCALC* gpu_x_values = NULL; //processConstants + 13; //AllocAndCopyToDevice(echotimes, echoesCount); NTCALC* gpu_weights = AllocAndCopyToDevice(weights, echoesCount * sliceCount); NTOUTPUT* gpu_output = AllocOnDevice<NTOUTPUT>(validDataLength * (parametersCount + 1)); //CudaArray + Texture for restructered data cudaChannelFormatDesc channelFormatDesc = cudaCreateChannelDesc<NTCALC>(); cudaExtent restructeredDataExtent = make_cudaExtent(restructeredColumnsCount, restructeredRowsCount, 0); cudaArray* dataArray = NULL; HANDLE_CUDA_ERROR(cudaMalloc3DArray( &dataArray, &channelFormatDesc, restructeredDataExtent, cudaArrayDefault)); //TODO on better gpus -> for now global memory //Calc needed shared memory and matching threads/blocks int sharedMemoryPerThread = parametersCount + //params echoesCount + //y_hat echoesCount * parametersCount + //dydp parametersCount * parametersCount + //alpha parametersCount + //beta echoesCount; //temp #ifdef USE_SHARED_MEMORY int sharedMemoryPerThreadBytes = sharedMemoryPerThread * sizeof(NTCALC); int availableSharedMemoryPerBlock = CUDADevicesService::getSharedMemoryPerBlock(); int threadsPerBlock = min((int)(availableSharedMemoryPerBlock / sharedMemoryPerThreadBytes), int threadCount); #else NTCALC* gpu_artifacts = AllocOnDevice<NTCALC>(sharedMemoryPerThread * restructeredRowsCount); int threadsPerBlock = threadCount; #endif for(int kernelIndex = 0; kernelIndex < kernelCalls; ++kernelIndex) { int remainingValidDataLength = validDataLength - kernelIndex * restructeredRowsCount; int callRowsCount = min(restructeredRowsCount, remainingValidDataLength); HANDLE_CUDA_ERROR(cudaMemcpyToArray(dataArray, 0, 0, restructeredData + kernelIndex * restructeredColumnsCount * restructeredRowsCount, restructeredColumnsCount * sizeof(NTCALC) * callRowsCount, cudaMemcpyHostToDevice)); HANDLE_CUDA_ERROR( cudaBindTextureToArray(floatTexture, dataArray, channelFormatDesc)); int blockCount = (callRowsCount + threadsPerBlock - 1) / threadsPerBlock; #ifdef USE_SHARED_MEMORY CudaLevenbergMarquardt<<<blockCount, threadsPerBlock, sharedMemoryPerThreadBytes>>>( startModelFunctionID, modelFunctionID, residualWeightingFunctionID, alphaWeightingFunctionID, parametersCount, restructeredColumnsCount, restructeredRowsCount, echoesCount, sharedMemoryPerThread, gpu_weights, gpu_x_values, (NTCALC*)NULL, gpu_output + kernelIndex * restructeredRowsCount * (parametersCount + 1), EPSILON_1, EPSILON_2); #else CudaLevenbergMarquardt<<<blockCount, threadsPerBlock>>>( startModelFunctionID, modelFunctionID, residualWeightingFunctionID, alphaWeightingFunctionID, parametersCount, restructeredColumnsCount, restructeredRowsCount, echoesCount, sharedMemoryPerThread, gpu_weights, gpu_x_values, gpu_artifacts, gpu_output + kernelIndex * restructeredRowsCount * (parametersCount + 1), EPSILON_1, EPSILON_2); #endif HANDLE_CUDA_ERROR(cudaGetLastError()); HANDLE_CUDA_ERROR(cudaUnbindTexture(floatTexture)); } NTOUTPUT* output = new NTOUTPUT[dataLength * (parametersCount + 1)]; NTOUTPUT* restructeredOutput = CopyFromDeviceAndFree(gpu_output, validDataLength * (parametersCount + 1)); HANDLE_CUDA_ERROR(cudaGetLastError()); RestructureDataBackward(output, validData, dataLength, restructeredOutput, parametersCount, true); free(restructeredData); free(restructeredOutput); #ifndef USE_SHARED_MEMORY cudaFree(gpu_artifacts); #endif cudaFree(gpu_weights); //cudaFree(gpu_x_values); cudaFreeArray(dataArray); return output; } template short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template float* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template unsigned short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, float* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template float* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, float* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template unsigned short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, float* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, unsigned short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template float* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, unsigned short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2); template unsigned short* CudaProcessLevenbergMarquardt( short startModelFunctionID, short modelFunctionID, short residualWeightingFunctionID, short alphaWeightingFunctionID, int parametersCount, int startIndex, int endIndex, int columnCount, int rowCount, int sliceCount, float* echotimes, float* weights, unsigned short* data, float threshold, float* constants, int constantsLength, float* parameterBoundaries, bool needsFlipAnglesData, bool needsT1Data, int threadCount, float EPSILON_1, float EPSILON_2);
2017f453ad00c7b521f664579c57f941e54509d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #include "opencv2/core/cuda/limits.hpp" using namespace cv::gpu; using namespace cv::gpu::cudev; //////////////////////////////////////////////////////////// // centeredGradient namespace tvl1flow { __global__ void centeredGradientKernel(const PtrStepSzf src, PtrStepf dx, PtrStepf dy) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= src.cols || y >= src.rows) return; dx(y, x) = 0.5f * (src(y, ::min(x + 1, src.cols - 1)) - src(y, ::max(x - 1, 0))); dy(y, x) = 0.5f * (src(::min(y + 1, src.rows - 1), x) - src(::max(y - 1, 0), x)); } void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy) { const dim3 block(32, 8); const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); hipLaunchKernelGGL(( centeredGradientKernel), dim3(grid), dim3(block), 0, 0, src, dx, dy); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // warpBackward namespace tvl1flow { static __device__ __forceinline__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } texture<float, hipTextureType2D, hipReadModeElementType> tex_I1 (false, hipFilterModePoint, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> tex_I1x(false, hipFilterModePoint, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> tex_I1y(false, hipFilterModePoint, hipAddressModeClamp); __global__ void warpBackwardKernel(const PtrStepSzf I0, const PtrStepf u1, const PtrStepf u2, PtrStepf I1w, PtrStepf I1wx, PtrStepf I1wy, PtrStepf grad, PtrStepf rho) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= I0.cols || y >= I0.rows) return; const float u1Val = u1(y, x); const float u2Val = u2(y, x); const float wx = x + u1Val; const float wy = y + u2Val; const int xmin = ::ceilf(wx - 2.0f); const int xmax = ::floorf(wx + 2.0f); const int ymin = ::ceilf(wy - 2.0f); const int ymax = ::floorf(wy + 2.0f); float sum = 0.0f; float sumx = 0.0f; float sumy = 0.0f; float wsum = 0.0f; for (int cy = ymin; cy <= ymax; ++cy) { for (int cx = xmin; cx <= xmax; ++cx) { const float w = bicubicCoeff(wx - cx) * bicubicCoeff(wy - cy); sum += w * tex2D(tex_I1 , cx, cy); sumx += w * tex2D(tex_I1x, cx, cy); sumy += w * tex2D(tex_I1y, cx, cy); wsum += w; } } const float coeff = 1.0f / wsum; const float I1wVal = sum * coeff; const float I1wxVal = sumx * coeff; const float I1wyVal = sumy * coeff; I1w(y, x) = I1wVal; I1wx(y, x) = I1wxVal; I1wy(y, x) = I1wyVal; const float Ix2 = I1wxVal * I1wxVal; const float Iy2 = I1wyVal * I1wyVal; // store the |Grad(I1)|^2 grad(y, x) = Ix2 + Iy2; // compute the constant part of the rho function const float I0Val = I0(y, x); rho(y, x) = I1wVal - I1wxVal * u1Val - I1wyVal * u2Val - I0Val; } void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho) { const dim3 block(32, 8); const dim3 grid(divUp(I0.cols, block.x), divUp(I0.rows, block.y)); bindTexture(&tex_I1 , I1); bindTexture(&tex_I1x, I1x); bindTexture(&tex_I1y, I1y); hipLaunchKernelGGL(( warpBackwardKernel), dim3(grid), dim3(block), 0, 0, I0, u1, u2, I1w, I1wx, I1wy, grad, rho); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // estimateU namespace tvl1flow { __device__ float divergence(const PtrStepf& v1, const PtrStepf& v2, int y, int x) { if (x > 0 && y > 0) { const float v1x = v1(y, x) - v1(y, x - 1); const float v2y = v2(y, x) - v2(y - 1, x); return v1x + v2y; } else { if (y > 0) return v1(y, 0) + v2(y, 0) - v2(y - 1, 0); else { if (x > 0) return v1(0, x) - v1(0, x - 1) + v2(0, x); else return v1(0, 0) + v2(0, 0); } } } __global__ void estimateUKernel(const PtrStepSzf I1wx, const PtrStepf I1wy, const PtrStepf grad, const PtrStepf rho_c, const PtrStepf p11, const PtrStepf p12, const PtrStepf p21, const PtrStepf p22, PtrStepf u1, PtrStepf u2, PtrStepf error, const float l_t, const float theta) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= I1wx.cols || y >= I1wx.rows) return; const float I1wxVal = I1wx(y, x); const float I1wyVal = I1wy(y, x); const float gradVal = grad(y, x); const float u1OldVal = u1(y, x); const float u2OldVal = u2(y, x); const float rho = rho_c(y, x) + (I1wxVal * u1OldVal + I1wyVal * u2OldVal); // estimate the values of the variable (v1, v2) (thresholding operator TH) float d1 = 0.0f; float d2 = 0.0f; if (rho < -l_t * gradVal) { d1 = l_t * I1wxVal; d2 = l_t * I1wyVal; } else if (rho > l_t * gradVal) { d1 = -l_t * I1wxVal; d2 = -l_t * I1wyVal; } else if (gradVal > numeric_limits<float>::epsilon()) { const float fi = -rho / gradVal; d1 = fi * I1wxVal; d2 = fi * I1wyVal; } const float v1 = u1OldVal + d1; const float v2 = u2OldVal + d2; // compute the divergence of the dual variable (p1, p2) const float div_p1 = divergence(p11, p12, y, x); const float div_p2 = divergence(p21, p22, y, x); // estimate the values of the optical flow (u1, u2) const float u1NewVal = v1 + theta * div_p1; const float u2NewVal = v2 + theta * div_p2; u1(y, x) = u1NewVal; u2(y, x) = u2NewVal; const float n1 = (u1OldVal - u1NewVal) * (u1OldVal - u1NewVal); const float n2 = (u2OldVal - u2NewVal) * (u2OldVal - u2NewVal); error(y, x) = n1 + n2; } void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho_c, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf error, float l_t, float theta) { const dim3 block(32, 8); const dim3 grid(divUp(I1wx.cols, block.x), divUp(I1wx.rows, block.y)); hipLaunchKernelGGL(( estimateUKernel), dim3(grid), dim3(block), 0, 0, I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, u1, u2, error, l_t, theta); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // estimateDualVariables namespace tvl1flow { __global__ void estimateDualVariablesKernel(const PtrStepSzf u1, const PtrStepf u2, PtrStepf p11, PtrStepf p12, PtrStepf p21, PtrStepf p22, const float taut) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= u1.cols || y >= u1.rows) return; const float u1x = u1(y, ::min(x + 1, u1.cols - 1)) - u1(y, x); const float u1y = u1(::min(y + 1, u1.rows - 1), x) - u1(y, x); const float u2x = u2(y, ::min(x + 1, u1.cols - 1)) - u2(y, x); const float u2y = u2(::min(y + 1, u1.rows - 1), x) - u2(y, x); const float g1 = ::hypotf(u1x, u1y); const float g2 = ::hypotf(u2x, u2y); const float ng1 = 1.0f + taut * g1; const float ng2 = 1.0f + taut * g2; p11(y, x) = (p11(y, x) + taut * u1x) / ng1; p12(y, x) = (p12(y, x) + taut * u1y) / ng1; p21(y, x) = (p21(y, x) + taut * u2x) / ng2; p22(y, x) = (p22(y, x) + taut * u2y) / ng2; } void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, float taut) { const dim3 block(32, 8); const dim3 grid(divUp(u1.cols, block.x), divUp(u1.rows, block.y)); hipLaunchKernelGGL(( estimateDualVariablesKernel), dim3(grid), dim3(block), 0, 0, u1, u2, p11, p12, p21, p22, taut); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } } #endif // !defined CUDA_DISABLER
2017f453ad00c7b521f664579c57f941e54509d3.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #include "opencv2/core/cuda/limits.hpp" using namespace cv::gpu; using namespace cv::gpu::cudev; //////////////////////////////////////////////////////////// // centeredGradient namespace tvl1flow { __global__ void centeredGradientKernel(const PtrStepSzf src, PtrStepf dx, PtrStepf dy) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= src.cols || y >= src.rows) return; dx(y, x) = 0.5f * (src(y, ::min(x + 1, src.cols - 1)) - src(y, ::max(x - 1, 0))); dy(y, x) = 0.5f * (src(::min(y + 1, src.rows - 1), x) - src(::max(y - 1, 0), x)); } void centeredGradient(PtrStepSzf src, PtrStepSzf dx, PtrStepSzf dy) { const dim3 block(32, 8); const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y)); centeredGradientKernel<<<grid, block>>>(src, dx, dy); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // warpBackward namespace tvl1flow { static __device__ __forceinline__ float bicubicCoeff(float x_) { float x = fabsf(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } texture<float, cudaTextureType2D, cudaReadModeElementType> tex_I1 (false, cudaFilterModePoint, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> tex_I1x(false, cudaFilterModePoint, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> tex_I1y(false, cudaFilterModePoint, cudaAddressModeClamp); __global__ void warpBackwardKernel(const PtrStepSzf I0, const PtrStepf u1, const PtrStepf u2, PtrStepf I1w, PtrStepf I1wx, PtrStepf I1wy, PtrStepf grad, PtrStepf rho) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= I0.cols || y >= I0.rows) return; const float u1Val = u1(y, x); const float u2Val = u2(y, x); const float wx = x + u1Val; const float wy = y + u2Val; const int xmin = ::ceilf(wx - 2.0f); const int xmax = ::floorf(wx + 2.0f); const int ymin = ::ceilf(wy - 2.0f); const int ymax = ::floorf(wy + 2.0f); float sum = 0.0f; float sumx = 0.0f; float sumy = 0.0f; float wsum = 0.0f; for (int cy = ymin; cy <= ymax; ++cy) { for (int cx = xmin; cx <= xmax; ++cx) { const float w = bicubicCoeff(wx - cx) * bicubicCoeff(wy - cy); sum += w * tex2D(tex_I1 , cx, cy); sumx += w * tex2D(tex_I1x, cx, cy); sumy += w * tex2D(tex_I1y, cx, cy); wsum += w; } } const float coeff = 1.0f / wsum; const float I1wVal = sum * coeff; const float I1wxVal = sumx * coeff; const float I1wyVal = sumy * coeff; I1w(y, x) = I1wVal; I1wx(y, x) = I1wxVal; I1wy(y, x) = I1wyVal; const float Ix2 = I1wxVal * I1wxVal; const float Iy2 = I1wyVal * I1wyVal; // store the |Grad(I1)|^2 grad(y, x) = Ix2 + Iy2; // compute the constant part of the rho function const float I0Val = I0(y, x); rho(y, x) = I1wVal - I1wxVal * u1Val - I1wyVal * u2Val - I0Val; } void warpBackward(PtrStepSzf I0, PtrStepSzf I1, PtrStepSzf I1x, PtrStepSzf I1y, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf I1w, PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho) { const dim3 block(32, 8); const dim3 grid(divUp(I0.cols, block.x), divUp(I0.rows, block.y)); bindTexture(&tex_I1 , I1); bindTexture(&tex_I1x, I1x); bindTexture(&tex_I1y, I1y); warpBackwardKernel<<<grid, block>>>(I0, u1, u2, I1w, I1wx, I1wy, grad, rho); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // estimateU namespace tvl1flow { __device__ float divergence(const PtrStepf& v1, const PtrStepf& v2, int y, int x) { if (x > 0 && y > 0) { const float v1x = v1(y, x) - v1(y, x - 1); const float v2y = v2(y, x) - v2(y - 1, x); return v1x + v2y; } else { if (y > 0) return v1(y, 0) + v2(y, 0) - v2(y - 1, 0); else { if (x > 0) return v1(0, x) - v1(0, x - 1) + v2(0, x); else return v1(0, 0) + v2(0, 0); } } } __global__ void estimateUKernel(const PtrStepSzf I1wx, const PtrStepf I1wy, const PtrStepf grad, const PtrStepf rho_c, const PtrStepf p11, const PtrStepf p12, const PtrStepf p21, const PtrStepf p22, PtrStepf u1, PtrStepf u2, PtrStepf error, const float l_t, const float theta) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= I1wx.cols || y >= I1wx.rows) return; const float I1wxVal = I1wx(y, x); const float I1wyVal = I1wy(y, x); const float gradVal = grad(y, x); const float u1OldVal = u1(y, x); const float u2OldVal = u2(y, x); const float rho = rho_c(y, x) + (I1wxVal * u1OldVal + I1wyVal * u2OldVal); // estimate the values of the variable (v1, v2) (thresholding operator TH) float d1 = 0.0f; float d2 = 0.0f; if (rho < -l_t * gradVal) { d1 = l_t * I1wxVal; d2 = l_t * I1wyVal; } else if (rho > l_t * gradVal) { d1 = -l_t * I1wxVal; d2 = -l_t * I1wyVal; } else if (gradVal > numeric_limits<float>::epsilon()) { const float fi = -rho / gradVal; d1 = fi * I1wxVal; d2 = fi * I1wyVal; } const float v1 = u1OldVal + d1; const float v2 = u2OldVal + d2; // compute the divergence of the dual variable (p1, p2) const float div_p1 = divergence(p11, p12, y, x); const float div_p2 = divergence(p21, p22, y, x); // estimate the values of the optical flow (u1, u2) const float u1NewVal = v1 + theta * div_p1; const float u2NewVal = v2 + theta * div_p2; u1(y, x) = u1NewVal; u2(y, x) = u2NewVal; const float n1 = (u1OldVal - u1NewVal) * (u1OldVal - u1NewVal); const float n2 = (u2OldVal - u2NewVal) * (u2OldVal - u2NewVal); error(y, x) = n1 + n2; } void estimateU(PtrStepSzf I1wx, PtrStepSzf I1wy, PtrStepSzf grad, PtrStepSzf rho_c, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf error, float l_t, float theta) { const dim3 block(32, 8); const dim3 grid(divUp(I1wx.cols, block.x), divUp(I1wx.rows, block.y)); estimateUKernel<<<grid, block>>>(I1wx, I1wy, grad, rho_c, p11, p12, p21, p22, u1, u2, error, l_t, theta); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } } //////////////////////////////////////////////////////////// // estimateDualVariables namespace tvl1flow { __global__ void estimateDualVariablesKernel(const PtrStepSzf u1, const PtrStepf u2, PtrStepf p11, PtrStepf p12, PtrStepf p21, PtrStepf p22, const float taut) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= u1.cols || y >= u1.rows) return; const float u1x = u1(y, ::min(x + 1, u1.cols - 1)) - u1(y, x); const float u1y = u1(::min(y + 1, u1.rows - 1), x) - u1(y, x); const float u2x = u2(y, ::min(x + 1, u1.cols - 1)) - u2(y, x); const float u2y = u2(::min(y + 1, u1.rows - 1), x) - u2(y, x); const float g1 = ::hypotf(u1x, u1y); const float g2 = ::hypotf(u2x, u2y); const float ng1 = 1.0f + taut * g1; const float ng2 = 1.0f + taut * g2; p11(y, x) = (p11(y, x) + taut * u1x) / ng1; p12(y, x) = (p12(y, x) + taut * u1y) / ng1; p21(y, x) = (p21(y, x) + taut * u2x) / ng2; p22(y, x) = (p22(y, x) + taut * u2y) / ng2; } void estimateDualVariables(PtrStepSzf u1, PtrStepSzf u2, PtrStepSzf p11, PtrStepSzf p12, PtrStepSzf p21, PtrStepSzf p22, float taut) { const dim3 block(32, 8); const dim3 grid(divUp(u1.cols, block.x), divUp(u1.rows, block.y)); estimateDualVariablesKernel<<<grid, block>>>(u1, u2, p11, p12, p21, p22, taut); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } } #endif // !defined CUDA_DISABLER