hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
26bbbbfc5c6c41abb451b80d99cf6cefc883b0c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _NINLJ_CHECK_H_ #define _NINLJ_CHECK_H_ #ifndef SHARED_MEM __global__ void gpuNLJ_kernel(int* d_temp, Record* d_shared_s, Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_n) { //__shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; Record* shared_s; shared_s = d_shared_s + blockIdx.y*blockDim.x*NLJ_S_BLOCK_SIZE+blockIdx.x*NLJ_S_BLOCK_SIZE; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=d_R[i+tid]; d_temp[i] = d_R[i].x; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { numResult++; } } } __syncthreads(); d_n[resultID]=numResult; } #endif #ifndef COALESCED __global__ void gpuNLJ_noCoalesced_kernel(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_n) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = tid; (i) < rLen; i=i+NLJ_R_BLOCK_SIZE) { rTmp=d_R[i]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { numResult++; } } } __syncthreads(); d_n[resultID]=numResult; } #endif //best with shared memory , with coaesced __global__ void gpuNLJ_kernel(int* d_temp, Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_n) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = tid; (i) < rLen; i=i+NLJ_R_BLOCK_SIZE) { rTmp=d_R[i]; d_temp[i] = d_R[i].x; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { numResult++; } } } __syncthreads(); d_n[resultID]=numResult; } #ifndef SHARED_MEM __global__ void write(Record* d_shared_s, Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { //__shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; Record* shared_s; shared_s = d_shared_s + blockIdx.y*blockDim.x*NLJ_S_BLOCK_SIZE+blockIdx.x*NLJ_S_BLOCK_SIZE;; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=d_R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { output[base].x=rTmp.x; output[base].y=shared_s[j].x; base++; } } } __syncthreads(); } } #endif #ifndef COALESCED __global__ void write_noCoalesced(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); int numThread = blockDim.x; int len = rLen/numThread; int start = len*threadIdx.x; int end = start + len; if( threadIdx.x == numThread - 1 ) { end = rLen; } //for(i = tid; i < rLen; i=i+NLJ_R_BLOCK_SIZE) for( i = start; i < end; i++ ) { rTmp=d_R[i]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { output[base].x=rTmp.x; output[base].y=shared_s[j].x; base++; } } } __syncthreads(); } } #endif __global__ void write(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = tid; i < rLen; i=i+NLJ_R_BLOCK_SIZE) { rTmp=d_R[i]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { output[base].x=rTmp.x; output[base].y=shared_s[j].x; base++; } } } __syncthreads(); } } __global__ void matchCount_kernel(Record *R, Record *S, int sStart, int rLen, int sLen, int *d_n) { __shared__ Record match_ss[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; Record sTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; if(sStart+tid<sLen) { match_ss[tid]=S[sStart+tid]; } else { match_ss[tid].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { sTmp=match_ss[j]; if(sTmp.y!=-1) { if((sTmp.x<=rTmp.x && rTmp.x<=sTmp.y) || (rTmp.x<=sTmp.x && sTmp.x<=rTmp.y)) { numResult++; //printf("S%d, %d, %d, R%d,%d, %d\n", sStart+j, sTmp.x, sTmp.y, i+tid, rTmp.x, rTmp.y); } } } } __syncthreads(); d_n[resultID]=numResult; } __global__ void matchWrite_kernel(Record *R, Record *S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { __shared__ Record match_ss[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; Record sTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; if(sStart+tid<sLen) { match_ss[tid]=S[sStart+tid]; } else { match_ss[tid].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { rTmp=R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { sTmp=match_ss[j]; if(sTmp.y!=-1) { if((sTmp.x<=rTmp.x && rTmp.x<=sTmp.y) || (rTmp.x<=sTmp.x && sTmp.x<=rTmp.y)) { output[base].x=i+tid; output[base].y=sStart+j; base++; } } } } __syncthreads(); } } //use constant memory __global__ void gpuNLJ_Constant_kernel(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_n) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; if(sStart+tid<sLen) { shared_s[tid]=d_S[sStart+tid]; } else { shared_s[tid].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=d_R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { numResult++; } } } __syncthreads(); d_n[resultID]=numResult; } __global__ void write_Constant_kernel(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; if(sStart+tid<sLen) { shared_s[tid]=d_S[sStart+tid]; } else { shared_s[tid].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=d_R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { output[base].x=rTmp.x; output[base].y=shared_s[j].x; base++; } } } __syncthreads(); } } #endif
26bbbbfc5c6c41abb451b80d99cf6cefc883b0c3.cu
#ifndef _NINLJ_CHECK_H_ #define _NINLJ_CHECK_H_ #ifndef SHARED_MEM __global__ void gpuNLJ_kernel(int* d_temp, Record* d_shared_s, Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_n) { //__shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; Record* shared_s; shared_s = d_shared_s + blockIdx.y*blockDim.x*NLJ_S_BLOCK_SIZE+blockIdx.x*NLJ_S_BLOCK_SIZE; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=d_R[i+tid]; d_temp[i] = d_R[i].x; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { numResult++; } } } __syncthreads(); d_n[resultID]=numResult; } #endif #ifndef COALESCED __global__ void gpuNLJ_noCoalesced_kernel(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_n) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = tid; (i) < rLen; i=i+NLJ_R_BLOCK_SIZE) { rTmp=d_R[i]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { numResult++; } } } __syncthreads(); d_n[resultID]=numResult; } #endif //best with shared memory , with coaesced __global__ void gpuNLJ_kernel(int* d_temp, Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_n) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = tid; (i) < rLen; i=i+NLJ_R_BLOCK_SIZE) { rTmp=d_R[i]; d_temp[i] = d_R[i].x; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { numResult++; } } } __syncthreads(); d_n[resultID]=numResult; } #ifndef SHARED_MEM __global__ void write(Record* d_shared_s, Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { //__shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; Record* shared_s; shared_s = d_shared_s + blockIdx.y*blockDim.x*NLJ_S_BLOCK_SIZE+blockIdx.x*NLJ_S_BLOCK_SIZE;; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=d_R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { output[base].x=rTmp.x; output[base].y=shared_s[j].x; base++; } } } __syncthreads(); } } #endif #ifndef COALESCED __global__ void write_noCoalesced(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); int numThread = blockDim.x; int len = rLen/numThread; int start = len*threadIdx.x; int end = start + len; if( threadIdx.x == numThread - 1 ) { end = rLen; } //for(i = tid; i < rLen; i=i+NLJ_R_BLOCK_SIZE) for( i = start; i < end; i++ ) { rTmp=d_R[i]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { output[base].x=rTmp.x; output[base].y=shared_s[j].x; base++; } } } __syncthreads(); } } #endif __global__ void write(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; int curPosInShared=0; for(i=0;i<NLJ_NUM_TUPLE_PER_THREAD;i++) { curPosInShared=tid+NLJ_NUM_THREADS_PER_BLOCK*i; if((curPosInShared+sStart)<sLen) shared_s[curPosInShared]=d_S[(curPosInShared+sStart)]; else shared_s[curPosInShared].y=-1; } __syncthreads(); for(i = tid; i < rLen; i=i+NLJ_R_BLOCK_SIZE) { rTmp=d_R[i]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { output[base].x=rTmp.x; output[base].y=shared_s[j].x; base++; } } } __syncthreads(); } } __global__ void matchCount_kernel(Record *R, Record *S, int sStart, int rLen, int sLen, int *d_n) { __shared__ Record match_ss[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; Record sTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; if(sStart+tid<sLen) { match_ss[tid]=S[sStart+tid]; } else { match_ss[tid].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { sTmp=match_ss[j]; if(sTmp.y!=-1) { if((sTmp.x<=rTmp.x && rTmp.x<=sTmp.y) || (rTmp.x<=sTmp.x && sTmp.x<=rTmp.y)) { numResult++; //printf("S%d, %d, %d, R%d,%d, %d\n", sStart+j, sTmp.x, sTmp.y, i+tid, rTmp.x, rTmp.y); } } } } __syncthreads(); d_n[resultID]=numResult; } __global__ void matchWrite_kernel(Record *R, Record *S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { __shared__ Record match_ss[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; Record sTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; if(sStart+tid<sLen) { match_ss[tid]=S[sStart+tid]; } else { match_ss[tid].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { rTmp=R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { sTmp=match_ss[j]; if(sTmp.y!=-1) { if((sTmp.x<=rTmp.x && rTmp.x<=sTmp.y) || (rTmp.x<=sTmp.x && sTmp.x<=rTmp.y)) { output[base].x=i+tid; output[base].y=sStart+j; base++; } } } } __syncthreads(); } } //use constant memory __global__ void gpuNLJ_Constant_kernel(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_n) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; int numResult=0; Record rTmp; sStart+=bid*NLJ_S_BLOCK_SIZE; if(sStart+tid<sLen) { shared_s[tid]=d_S[sStart+tid]; } else { shared_s[tid].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=d_R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { numResult++; } } } __syncthreads(); d_n[resultID]=numResult; } __global__ void write_Constant_kernel(Record *d_R, Record *d_S, int sStart, int rLen, int sLen, int *d_sum, Record *output) { __shared__ Record shared_s[NLJ_S_BLOCK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bid=bx+by*gridDim.x; int tid=tx+ty*blockDim.x; int resultID=(bid)*NLJ_NUM_THREADS_PER_BLOCK+tid; int j=0; int i=0; Record rTmp; int base=d_sum[resultID]; //if(d_sum[bstartSum]!=d_sum[bendSum]) //for(int sg=0;sg<NLJ_NUM_GRID_S;sg++) { sStart+=bid*NLJ_S_BLOCK_SIZE; if(sStart+tid<sLen) { shared_s[tid]=d_S[sStart+tid]; } else { shared_s[tid].y=-1; } __syncthreads(); for(i = 0; (i+tid) < rLen; i=i+NLJ_R_BLOCK_SIZE) { //printf("%d, ", i); rTmp=d_R[i+tid]; for(j=0;j<NLJ_S_BLOCK_SIZE;j++) { if(PRED_EQUAL(rTmp.y, shared_s[j].y)) { output[base].x=rTmp.x; output[base].y=shared_s[j].x; base++; } } } __syncthreads(); } } #endif
3dcac7f78b4a46724cf648251bc3b8176ffa7639.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 12.06.2019 // #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <ops/declarable/helpers/prefix.h> #include <ops/ops.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL static void prefixPerBlockCuda(scalar::Ops op, const void* vx, const sd::LongType* xTadShapeInfo, const sd::LongType* xTadOffsets, void* vz, const sd::LongType* zTadShapeInfo, const sd::LongType* zTadOffsets, const sd::LongType numTads, const sd::LongType tadLen, const bool exclusive, const bool reverse) { __shared__ T *shared, lastElemInChunk; __shared__ sd::Unsigned numTadChunks, blockDim2; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; shared = reinterpret_cast<T*>(shmem); blockDim2 = 2 * blockDim.x; numTadChunks = (tadLen + blockDim2 - 1) / blockDim2; // ceil } __syncthreads(); const auto xTad = reinterpret_cast<const T*>(vx) + xTadOffsets[blockIdx.x]; auto zTad = reinterpret_cast<T*>(vz) + zTadOffsets[blockIdx.x]; sd::LongType sharedInd(2 * threadIdx.x), leftArrInd, rightArrInd, step; T xLeft, xRight; for (sd::Unsigned i = 0; i < numTadChunks; ++i) { leftArrInd = sharedInd + i * blockDim2; rightArrInd = leftArrInd + 1; if (reverse) { if (rightArrInd < tadLen) { rightArrInd = tadLen - 1 - rightArrInd; leftArrInd = tadLen - 1 - leftArrInd; } else if (leftArrInd < tadLen) leftArrInd = tadLen - 1 - leftArrInd; } if (leftArrInd < tadLen) shared[sharedInd] = xLeft = xTad[shape::getIndexOffset(leftArrInd, xTadShapeInfo)]; // else // shared[sharedInd] = (op == scalar::Add) ? 0 : 1; if (rightArrInd < tadLen) shared[sharedInd + 1] = xRight = xTad[shape::getIndexOffset(rightArrInd, xTadShapeInfo)]; // else // shared[sharedInd + 1] = (op == scalar::Add) ? 0 : 1; step = 1; for (sd::Unsigned d = blockDim.x; d > 0; d /= 2) { __syncthreads(); if (threadIdx.x < d) { sd::Unsigned left = step * (sharedInd + 1) - 1; sd::Unsigned right = step * (sharedInd + 2) - 1; shared[right] = (op == scalar::Add) ? (shared[right] + shared[left]) : (shared[right] * shared[left]); } step *= 2; } if (threadIdx.x == 0) shared[blockDim2 - 1] = (op == scalar::Add) ? 0 : 1; __syncthreads(); for (sd::Unsigned d = 1; d < blockDim2; d *= 2) { step /= 2; __syncthreads(); if (threadIdx.x < d) { sd::Unsigned left = step * (sharedInd + 1) - 1; sd::Unsigned right = step * (sharedInd + 2) - 1; T temp = shared[left]; shared[left] = shared[right]; shared[right] = (op == scalar::Add) ? (shared[right] + temp) : (shared[right] * temp); } } __syncthreads(); if (leftArrInd < tadLen) { T result = shared[sharedInd]; if (!exclusive) result = (op == scalar::Add) ? result + xLeft : result * xLeft; if (i > 0) result = (op == scalar::Add) ? result + lastElemInChunk : result * lastElemInChunk; zTad[shape::getIndexOffset(leftArrInd, zTadShapeInfo)] = result; } if (rightArrInd < tadLen) { T result = shared[sharedInd + 1]; if (!exclusive) result = (op == scalar::Add) ? result + xRight : result * xRight; if (i > 0) result = (op == scalar::Add) ? result + lastElemInChunk : result * lastElemInChunk; if (i < numTadChunks - 1 && threadIdx.x == blockDim.x - 1) // last element in chunk lastElemInChunk = !exclusive ? result : (op == scalar::Add) ? result + xRight : result * xRight; zTad[shape::getIndexOffset(rightArrInd, zTadShapeInfo)] = result; } } } /////////////////////////////////////////////////////////////////// template <typename X> static void prefixPerBlockCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t* stream, scalar::Ops op, const void* vx, const sd::LongType* xTadShapeInfo, const sd::LongType* xTadOffsets, void* vz, const sd::LongType* zTadShapeInfo, const sd::LongType* zTadOffsets, const sd::LongType numTads, const sd::LongType tadLen, const bool exclusive, const bool reverse) { hipLaunchKernelGGL(( prefixPerBlockCuda<X>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, op, vx, xTadShapeInfo, xTadOffsets, vz, zTadShapeInfo, zTadOffsets, numTads, tadLen, exclusive, reverse); } /////////////////////////////////////////////////////////////////// void prefix(sd::LaunchContext* context, scalar::Ops op, const NDArray* x, NDArray* z, const std::vector<int>& dims, bool exclusive, bool reverse) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(x->shapeInfo(), dims); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(z->shapeInfo(), dims); const sd::LongType numTads = packX.numberOfTads(); const sd::LongType tadLen = x->lengthOf() / numTads; const int threadsPerBlock = SD_MAX_NUM_THREADS / 2; const int blocksPerGrid = numTads; const int sharedMem = 2 * threadsPerBlock * x->sizeOfT() + 128; PointersManager manager(context, "prefix"); NDArray::prepareSpecialUse({z}, {x}); BUILD_SINGLE_SELECTOR(x->dataType(), prefixPerBlockCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), op, x->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), z->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLen, exclusive, reverse), SD_NUMERIC_TYPES); NDArray::registerSpecialUse({z}, {x}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// void prefix(sd::LaunchContext* context, scalar::Ops op, const NDArray* x, NDArray* z, bool exclusive, bool reverse) { prefix(context, op, x, z, {}, exclusive, reverse); } } // namespace helpers } // namespace ops } // namespace sd
3dcac7f78b4a46724cf648251bc3b8176ffa7639.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma (iuriish@yahoo.com), created on 12.06.2019 // #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <ops/declarable/helpers/prefix.h> #include <ops/ops.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL static void prefixPerBlockCuda(scalar::Ops op, const void* vx, const sd::LongType* xTadShapeInfo, const sd::LongType* xTadOffsets, void* vz, const sd::LongType* zTadShapeInfo, const sd::LongType* zTadOffsets, const sd::LongType numTads, const sd::LongType tadLen, const bool exclusive, const bool reverse) { __shared__ T *shared, lastElemInChunk; __shared__ sd::Unsigned numTadChunks, blockDim2; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; shared = reinterpret_cast<T*>(shmem); blockDim2 = 2 * blockDim.x; numTadChunks = (tadLen + blockDim2 - 1) / blockDim2; // ceil } __syncthreads(); const auto xTad = reinterpret_cast<const T*>(vx) + xTadOffsets[blockIdx.x]; auto zTad = reinterpret_cast<T*>(vz) + zTadOffsets[blockIdx.x]; sd::LongType sharedInd(2 * threadIdx.x), leftArrInd, rightArrInd, step; T xLeft, xRight; for (sd::Unsigned i = 0; i < numTadChunks; ++i) { leftArrInd = sharedInd + i * blockDim2; rightArrInd = leftArrInd + 1; if (reverse) { if (rightArrInd < tadLen) { rightArrInd = tadLen - 1 - rightArrInd; leftArrInd = tadLen - 1 - leftArrInd; } else if (leftArrInd < tadLen) leftArrInd = tadLen - 1 - leftArrInd; } if (leftArrInd < tadLen) shared[sharedInd] = xLeft = xTad[shape::getIndexOffset(leftArrInd, xTadShapeInfo)]; // else // shared[sharedInd] = (op == scalar::Add) ? 0 : 1; if (rightArrInd < tadLen) shared[sharedInd + 1] = xRight = xTad[shape::getIndexOffset(rightArrInd, xTadShapeInfo)]; // else // shared[sharedInd + 1] = (op == scalar::Add) ? 0 : 1; step = 1; for (sd::Unsigned d = blockDim.x; d > 0; d /= 2) { __syncthreads(); if (threadIdx.x < d) { sd::Unsigned left = step * (sharedInd + 1) - 1; sd::Unsigned right = step * (sharedInd + 2) - 1; shared[right] = (op == scalar::Add) ? (shared[right] + shared[left]) : (shared[right] * shared[left]); } step *= 2; } if (threadIdx.x == 0) shared[blockDim2 - 1] = (op == scalar::Add) ? 0 : 1; __syncthreads(); for (sd::Unsigned d = 1; d < blockDim2; d *= 2) { step /= 2; __syncthreads(); if (threadIdx.x < d) { sd::Unsigned left = step * (sharedInd + 1) - 1; sd::Unsigned right = step * (sharedInd + 2) - 1; T temp = shared[left]; shared[left] = shared[right]; shared[right] = (op == scalar::Add) ? (shared[right] + temp) : (shared[right] * temp); } } __syncthreads(); if (leftArrInd < tadLen) { T result = shared[sharedInd]; if (!exclusive) result = (op == scalar::Add) ? result + xLeft : result * xLeft; if (i > 0) result = (op == scalar::Add) ? result + lastElemInChunk : result * lastElemInChunk; zTad[shape::getIndexOffset(leftArrInd, zTadShapeInfo)] = result; } if (rightArrInd < tadLen) { T result = shared[sharedInd + 1]; if (!exclusive) result = (op == scalar::Add) ? result + xRight : result * xRight; if (i > 0) result = (op == scalar::Add) ? result + lastElemInChunk : result * lastElemInChunk; if (i < numTadChunks - 1 && threadIdx.x == blockDim.x - 1) // last element in chunk lastElemInChunk = !exclusive ? result : (op == scalar::Add) ? result + xRight : result * xRight; zTad[shape::getIndexOffset(rightArrInd, zTadShapeInfo)] = result; } } } /////////////////////////////////////////////////////////////////// template <typename X> static void prefixPerBlockCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t* stream, scalar::Ops op, const void* vx, const sd::LongType* xTadShapeInfo, const sd::LongType* xTadOffsets, void* vz, const sd::LongType* zTadShapeInfo, const sd::LongType* zTadOffsets, const sd::LongType numTads, const sd::LongType tadLen, const bool exclusive, const bool reverse) { prefixPerBlockCuda<X><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>( op, vx, xTadShapeInfo, xTadOffsets, vz, zTadShapeInfo, zTadOffsets, numTads, tadLen, exclusive, reverse); } /////////////////////////////////////////////////////////////////// void prefix(sd::LaunchContext* context, scalar::Ops op, const NDArray* x, NDArray* z, const std::vector<int>& dims, bool exclusive, bool reverse) { auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(x->shapeInfo(), dims); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(z->shapeInfo(), dims); const sd::LongType numTads = packX.numberOfTads(); const sd::LongType tadLen = x->lengthOf() / numTads; const int threadsPerBlock = SD_MAX_NUM_THREADS / 2; const int blocksPerGrid = numTads; const int sharedMem = 2 * threadsPerBlock * x->sizeOfT() + 128; PointersManager manager(context, "prefix"); NDArray::prepareSpecialUse({z}, {x}); BUILD_SINGLE_SELECTOR(x->dataType(), prefixPerBlockCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), op, x->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), z->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLen, exclusive, reverse), SD_NUMERIC_TYPES); NDArray::registerSpecialUse({z}, {x}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// void prefix(sd::LaunchContext* context, scalar::Ops op, const NDArray* x, NDArray* z, bool exclusive, bool reverse) { prefix(context, op, x, z, {}, exclusive, reverse); } } // namespace helpers } // namespace ops } // namespace sd
f81e9f9b07f5db84b9dfac561ac2e14b07546b0e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <chrono> #include <random> #include <hip/hip_runtime.h> #include "kernels.h" #define nthreads 256 int main(int argc, char** argv) { if (argc != 4) { printf("Usage: %s <width> <height> <repeat>\n", argv[0]); return 1; } int width = atoi(argv[1]); int height = atoi(argv[2]); int repeat = atoi(argv[3]); size_t size = width * height; size_t size_output_bytes = size * sizeof(uint); size_t size_image_bytes = size * sizeof(float3); std::mt19937 gen(19937); // reduce the upper bound can increase the kernel execution time of eliminate_crosses std::uniform_real_distribution<float> dis(0.f, 0.4f); float3 *h_img = (float3*) malloc(size_image_bytes); uint *h_out = (uint*) malloc(size_output_bytes); float3 *d_img; hipMalloc((void**)&d_img, size_image_bytes); uint *d_tmp, *d_out; hipMalloc((void**)&d_tmp, size_output_bytes); hipMalloc((void**)&d_out, size_output_bytes); // assume that size is a multiple of nthreads dim3 grids (size / nthreads); dim3 blocks (nthreads); float sum = 0; float total_time = 0; for (int n = 0; n < repeat; n++) { for (size_t i = 0; i < size; i++) { h_img[i].x = dis(gen); h_img[i].y = dis(gen); h_img[i].z = dis(gen); } hipMemcpy(d_img, h_img, size_image_bytes, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( check_connect), dim3(grids), dim3(blocks), 0, 0, d_img, d_tmp, width, height); hipLaunchKernelGGL(( eliminate_crosses), dim3(grids), dim3(blocks), 0, 0, d_tmp, d_out, width, height); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); hipMemcpy(h_out, d_out, size_output_bytes, hipMemcpyDeviceToHost); std::chrono::duration<float> time = end - start; total_time += time.count(); float lsum = 0; for (size_t i = 0; i < size; i++) lsum += (h_out[i] & 0xff) / 256.f + ((h_out[i] >> 8) & 0xff) / 256.f + ((h_out[i] >> 16) & 0xff) / 256.f + ((h_out[i] >> 24) & 0xff) / 256.f; sum += lsum / size; } printf("Image size: %d (width) x %d (height)\ncheckSum: %f\n", width, height, sum); printf("Average kernel time over %d iterations: %f (s)\n", repeat, total_time / repeat); hipFree(d_out); hipFree(d_img); hipFree(d_tmp); free(h_out); free(h_img); return 0; }
f81e9f9b07f5db84b9dfac561ac2e14b07546b0e.cu
#include <stdio.h> #include <stdlib.h> #include <chrono> #include <random> #include <hip/hip_runtime.h> #include "kernels.h" #define nthreads 256 int main(int argc, char** argv) { if (argc != 4) { printf("Usage: %s <width> <height> <repeat>\n", argv[0]); return 1; } int width = atoi(argv[1]); int height = atoi(argv[2]); int repeat = atoi(argv[3]); size_t size = width * height; size_t size_output_bytes = size * sizeof(uint); size_t size_image_bytes = size * sizeof(float3); std::mt19937 gen(19937); // reduce the upper bound can increase the kernel execution time of eliminate_crosses std::uniform_real_distribution<float> dis(0.f, 0.4f); float3 *h_img = (float3*) malloc(size_image_bytes); uint *h_out = (uint*) malloc(size_output_bytes); float3 *d_img; hipMalloc((void**)&d_img, size_image_bytes); uint *d_tmp, *d_out; hipMalloc((void**)&d_tmp, size_output_bytes); hipMalloc((void**)&d_out, size_output_bytes); // assume that size is a multiple of nthreads dim3 grids (size / nthreads); dim3 blocks (nthreads); float sum = 0; float total_time = 0; for (int n = 0; n < repeat; n++) { for (size_t i = 0; i < size; i++) { h_img[i].x = dis(gen); h_img[i].y = dis(gen); h_img[i].z = dis(gen); } hipMemcpy(d_img, h_img, size_image_bytes, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); check_connect<<<grids, blocks>>>(d_img, d_tmp, width, height); eliminate_crosses<<<grids, blocks>>>(d_tmp, d_out, width, height); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); hipMemcpy(h_out, d_out, size_output_bytes, hipMemcpyDeviceToHost); std::chrono::duration<float> time = end - start; total_time += time.count(); float lsum = 0; for (size_t i = 0; i < size; i++) lsum += (h_out[i] & 0xff) / 256.f + ((h_out[i] >> 8) & 0xff) / 256.f + ((h_out[i] >> 16) & 0xff) / 256.f + ((h_out[i] >> 24) & 0xff) / 256.f; sum += lsum / size; } printf("Image size: %d (width) x %d (height)\ncheckSum: %f\n", width, height, sum); printf("Average kernel time over %d iterations: %f (s)\n", repeat, total_time / repeat); hipFree(d_out); hipFree(d_img); hipFree(d_tmp); free(h_out); free(h_img); return 0; }
a57fa21d53407174886567e46617b865fdf6e9cb.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/detail/get_value.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/types.hpp> #include <strings/convert/utilities.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <thrust/transform_reduce.h> #include <map> #include <vector> namespace cudf { namespace strings { namespace detail { namespace { // duration components timeparts structure struct alignas(4) duration_component { int32_t day; //-2,147,483,648 to 2,147,483,647 int32_t subsecond; // 000000000 to 999999999 int8_t hour; // 00 to 23 int8_t minute; // 00 to 59 int8_t second; // 00 to 59 bool is_negative; // true/false }; enum class format_char_type : int8_t { literal, // literal char type passed through specifier // duration format specifier }; /** * @brief Represents a format specifier or literal from a duration format string. * * Created by the format_compiler when parsing a format string. */ struct alignas(4) format_item { format_char_type item_type; // specifier or literal indicator char value; // specifier or literal value int8_t length; // item length in bytes static format_item new_specifier(char format_char, int8_t length) { return format_item{format_char_type::specifier, format_char, length}; } static format_item new_delimiter(char literal) { return format_item{format_char_type::literal, literal, 1}; } }; /** * @brief The format_compiler parses a duration format string into a vector of * format_items. * * The vector of format_items are used when parsing a string into duration * components and when formatting a string from duration components. */ struct format_compiler { std::string format; rmm::device_uvector<format_item> d_items; format_compiler(const char* format_, rmm::cuda_stream_view stream) : format(format_), d_items(0, stream) { static std::map<char, int8_t> const specifier_lengths = { {'-', -1}, // '-' if negative {'D', -1}, // 1 to 11 (not in std::format) {'H', 2}, // HH {'I', 2}, // HH {'M', 2}, // MM {'S', -1}, // 2 to 13 SS[.mmm][uuu][nnn] (uuu,nnn are not in std::format) {'p', 2}, // AM/PM {'R', 5}, // 5 HH:MM {'T', 8}, // 8 HH:MM:SS" {'r', 11} // HH:MM:SS AM/PM }; std::vector<format_item> items; const char* str = format.c_str(); auto length = format.length(); bool negative_sign{true}; while (length > 0) { char ch = *str++; length--; if (ch != '%') { items.push_back(format_item::new_delimiter(ch)); continue; } CUDF_EXPECTS(length > 0, "Unfinished specifier in duration format"); ch = *str++; length--; if (ch == '%') // escaped % char { items.push_back(format_item::new_delimiter(ch)); continue; } else if (ch == 'n') { items.push_back(format_item::new_delimiter('\n')); continue; } else if (ch == 't') { items.push_back(format_item::new_delimiter('\t')); continue; } if (ch == 'O') { CUDF_EXPECTS(*str == 'H' || *str == 'I' || *str == 'M' || *str == 'S', "locale's alternative representation not supported for specifier: " + std::string(1, *str)); ch = *str++; length--; items.push_back(format_item::new_specifier(ch, 2)); // without sign continue; } CUDF_EXPECTS(specifier_lengths.find(ch) != specifier_lengths.end(), "invalid format specifier: " + std::string(1, ch)); // negative sign should be present only once. if (negative_sign) { if (std::string("DHIMSRT").find_first_of(ch) != std::string::npos) { items.push_back(format_item::new_specifier('-', specifier_lengths.at('-'))); negative_sign = false; } } int8_t spec_length = specifier_lengths.at(ch); items.push_back(format_item::new_specifier(ch, spec_length)); } // create program in device memory d_items.resize(items.size(), stream); CUDA_TRY(hipMemcpyAsync(d_items.data(), items.data(), items.size() * sizeof(items[0]), hipMemcpyHostToDevice, stream.value())); } format_item const* compiled_format_items() { return d_items.data(); } size_type items_count() const { return static_cast<size_type>(d_items.size()); } }; template <typename T> __device__ void dissect_duration(T duration, duration_component* timeparts) { timeparts->is_negative = (duration < T{0}); timeparts->day = cuda::std::chrono::duration_cast<duration_D>(duration).count(); if (cuda::std::is_same<T, duration_D>::value) return; duration_s seconds = cuda::std::chrono::duration_cast<duration_s>(duration); timeparts->hour = (cuda::std::chrono::duration_cast<cuda::std::chrono::hours>(seconds) % duration_D(1)).count(); timeparts->minute = (cuda::std::chrono::duration_cast<cuda::std::chrono::minutes>(seconds) % cuda::std::chrono::hours(1)) .count(); timeparts->second = (seconds % cuda::std::chrono::minutes(1)).count(); if (not cuda::std::is_same<T, duration_s>::value) { timeparts->subsecond = (duration % duration_s(1)).count(); } } template <typename T> struct duration_to_string_size_fn { const column_device_view d_durations; const format_item* d_format_items; size_type items_count; __device__ int8_t format_length(char format_char, duration_component const* const timeparts) const { switch (format_char) { case '-': return timeparts->is_negative; break; case 'D': return count_digits(timeparts->day) - (timeparts->day < 0); break; case 'S': return 2 + (timeparts->subsecond == 0 ? 0 : [] { if (cuda::std::is_same<T, duration_ms>::value) return 3 + 1; // +1 is for dot if (cuda::std::is_same<T, duration_us>::value) return 6 + 1; // +1 is for dot if (cuda::std::is_same<T, duration_ns>::value) return 9 + 1; // +1 is for dot return 0; }()); break; default: return 2; } } __device__ size_type operator()(size_type idx) { if (d_durations.is_null(idx)) return 0; auto duration = d_durations.element<T>(idx); duration_component timeparts = {0}; // days, hours, minutes, seconds, subseconds(9) dissect_duration(duration, &timeparts); return thrust::transform_reduce( thrust::seq, d_format_items, d_format_items + items_count, [this, &timeparts] __device__(format_item item) -> size_type { if (item.item_type == format_char_type::literal) return 1; else if (item.length != -1) return item.length; else return format_length(item.value, &timeparts); }, size_type{0}, thrust::plus<size_type>()); } }; template <typename T> struct duration_to_string_fn : public duration_to_string_size_fn<T> { const int32_t* d_offsets; char* d_chars; using duration_to_string_size_fn<T>::d_durations; using duration_to_string_size_fn<T>::d_format_items; using duration_to_string_size_fn<T>::items_count; duration_to_string_fn(const column_device_view d_durations, const format_item* d_format_items, size_type items_count, const int32_t* d_offsets, char* d_chars) : duration_to_string_size_fn<T>{d_durations, d_format_items, items_count}, d_offsets(d_offsets), d_chars(d_chars) { } // utility to create (optionally) 0-padded integers (up to 10 chars) without negative sign. // min_digits==-1 indicates no 0-padding. __device__ char* int2str(char* str, int min_digits, int32_t value) { constexpr int MAX_DIGITS = 10; // largest 32-bit integer is 10 digits assert(min_digits <= MAX_DIGITS); if (value == 0) { do { *str++ = '0'; } while (--min_digits > 0); return str; } char digits[MAX_DIGITS] = {'0', '0', '0', '0', '0', '0', '0', '0', '0', '0'}; int digits_idx = 0; while (value != 0) { assert(digits_idx < MAX_DIGITS); digits[digits_idx++] = '0' + std::abs(value % 10); // next digit value = value / 10; } digits_idx = ::max(digits_idx, min_digits); // digits are backwards, reverse the string into the output while (digits_idx-- > 0) *str++ = digits[digits_idx]; return str; } __device__ char* int_to_2digitstr(char* str, int8_t value) { assert(value >= -99 && value <= 99); value = std::abs(value); str[0] = '0' + value / 10; str[1] = '0' + value % 10; return str + 2; } inline __device__ char* day(char* ptr, duration_component const* timeparts) { return int2str(ptr, -1, timeparts->day); } inline __device__ char* hour_12(char* ptr, duration_component const* timeparts) { return int_to_2digitstr(ptr, timeparts->hour % 12); } inline __device__ char* hour_24(char* ptr, duration_component const* timeparts) { return int_to_2digitstr(ptr, timeparts->hour); } inline __device__ char* am_or_pm(char* ptr, duration_component const* timeparts) { *ptr++ = (timeparts->hour / 12 == 0 ? 'A' : 'P'); *ptr++ = 'M'; return ptr; } inline __device__ char* minute(char* ptr, duration_component const* timeparts) { return int_to_2digitstr(ptr, timeparts->minute); } inline __device__ char* second(char* ptr, duration_component const* timeparts) { return int_to_2digitstr(ptr, timeparts->second); } inline __device__ char* subsecond(char* ptr, duration_component const* timeparts) { if (timeparts->subsecond == 0) return ptr; const int digits = duration_to_string_size_fn<T>::format_length('S', timeparts) - 3; *ptr = '.'; auto value = timeparts->subsecond; for (int idx = digits; idx > 0; idx--) { *(ptr + idx) = '0' + std::abs(value % 10); value /= 10; } return ptr + digits + 1; } __device__ char* format_from_parts(duration_component const* timeparts, char* ptr) { for (size_t idx = 0; idx < items_count; ++idx) { auto item = d_format_items[idx]; if (item.item_type == format_char_type::literal) { *ptr++ = item.value; continue; } // special logic for each specifier switch (item.value) { case 'D': // days ptr = day(ptr, timeparts); break; case '-': // - if value is negative if (timeparts->is_negative) *ptr++ = '-'; break; case 'H': // 24-hour ptr = hour_24(ptr, timeparts); break; case 'I': // 12-hour ptr = hour_12(ptr, timeparts); break; case 'M': // minute ptr = minute(ptr, timeparts); break; case 'S': // second ptr = second(ptr, timeparts); if (item.length == 2) break; case 'f': // sub-second ptr = subsecond(ptr, timeparts); break; case 'p': ptr = am_or_pm(ptr, timeparts); break; case 'R': // HH:MM 24-hour ptr = hour_24(ptr, timeparts); *ptr++ = ':'; ptr = minute(ptr, timeparts); break; case 'T': // HH:MM:SS 24-hour ptr = hour_24(ptr, timeparts); *ptr++ = ':'; ptr = minute(ptr, timeparts); *ptr++ = ':'; ptr = second(ptr, timeparts); break; case 'r': // HH:MM:SS AM/PM 12-hour ptr = hour_12(ptr, timeparts); *ptr++ = ':'; ptr = minute(ptr, timeparts); *ptr++ = ':'; ptr = second(ptr, timeparts); *ptr++ = ' '; ptr = am_or_pm(ptr, timeparts); break; default: // ignore everything else break; } } return ptr; } __device__ void operator()(size_type idx) { if (d_durations.is_null(idx)) return; auto duration = d_durations.template element<T>(idx); duration_component timeparts = {0}; // days, hours, minutes, seconds, subseconds(9) dissect_duration(duration, &timeparts); // convert to characters format_from_parts(&timeparts, d_chars + d_offsets[idx]); } }; /** * @brief This dispatch method is for converting durations into strings. * * The template function declaration ensures only duration types are used. */ struct dispatch_from_durations_fn { template <typename T, std::enable_if_t<cudf::is_duration<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const& durations, std::string const& format, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); format_compiler compiler(format.c_str(), stream); auto d_format_items = compiler.compiled_format_items(); size_type strings_count = durations.size(); auto column = column_device_view::create(durations, stream); auto d_column = *column; // copy null mask rmm::device_buffer null_mask = cudf::detail::copy_bitmask(durations, stream, mr); // build offsets column auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0), duration_to_string_size_fn<T>{d_column, d_format_items, compiler.items_count()}); auto offsets_column = detail::make_offsets_child_column( offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr); auto offsets_view = offsets_column->view(); auto d_new_offsets = offsets_view.template data<int32_t>(); // build chars column auto const chars_bytes = cudf::detail::get_value<int32_t>(offsets_column->view(), strings_count, stream); auto chars_column = detail::create_chars_child_column(strings_count, chars_bytes, stream, mr); auto d_chars = chars_column->mutable_view().template data<char>(); thrust::for_each_n(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), strings_count, duration_to_string_fn<T>{ d_column, d_format_items, compiler.items_count(), d_new_offsets, d_chars}); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), durations.null_count(), std::move(null_mask), stream, mr); } // non-duration types throw an exception template <typename T, typename... Args> std::enable_if_t<not cudf::is_duration<T>(), std::unique_ptr<column>> operator()(Args&&...) const { CUDF_FAIL("Values for from_durations function must be a duration type."); } }; static const __device__ __constant__ int32_t powers_of_ten[10] = { 1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L}; // this parses duration string into a duration integer template <typename T> // duration type struct parse_duration { column_device_view const d_strings; format_item const* d_format_items; size_type items_count; // function to parse string (maximum 10 digits) to integer. __device__ int32_t str2int(const char* str, int8_t max_bytes, int8_t& actual_length) { const char* ptr = (*str == '-' || *str == '+') ? str + 1 : str; int32_t value = 0; for (int8_t idx = 0; idx < max_bytes; ++idx) { char chr = *ptr++; if (chr < '0' || chr > '9') { ptr--; // roll back break; } value = (value * 10) + static_cast<int32_t>(chr - '0'); } actual_length += (ptr - str); return (*str == '-') ? -value : value; } // function to parse fraction of decimal value with trailing zeros removed. __device__ int32_t str2int_fixed(const char* str, int8_t fixed_width, size_type string_length, int8_t& actual_length) { const char* ptr = (*str == '.') ? str + 1 : str; int32_t value = 0; // parse till fixed_width or end of string. for (int8_t idx = 0; idx < fixed_width && idx < string_length; ++idx) { char chr = *ptr++; if (chr < '0' || chr > '9') { ptr--; // roll back break; } value = (value * 10) + static_cast<int32_t>(chr - '0'); } auto parsed_length = ptr - str; // compensate for missing trailing zeros if (parsed_length < fixed_width) value *= powers_of_ten[fixed_width - parsed_length]; actual_length += parsed_length; return value; } // parse 2 digit string to integer __device__ int8_t parse_2digit_int(const char* str, int8_t& actual_length) { const char* ptr = (*str == '-' || *str == '+') ? str + 1 : str; int8_t value = 0; if (*ptr >= '0' && *ptr <= '9') value = (value * 10) + static_cast<int32_t>(*ptr++ - '0'); if (*ptr >= '0' && *ptr <= '9') value = (value * 10) + static_cast<int32_t>(*ptr++ - '0'); actual_length += (ptr - str); return (*str == '-') ? -value : value; } inline __device__ int8_t parse_hour(const char* str, int8_t& actual_length) { return parse_2digit_int(str, actual_length); } inline __device__ int8_t parse_minute(const char* str, int8_t& actual_length) { return parse_2digit_int(str, actual_length); } inline __device__ int8_t parse_second(const char* str, int8_t& actual_length) { return parse_2digit_int(str, actual_length); } // Walk the format_items to read the datetime string. // Returns 0 if all ok. __device__ int parse_into_parts(string_view const& d_string, duration_component* timeparts) { auto ptr = d_string.data(); auto length = d_string.size_bytes(); int8_t hour_shift{0}; for (size_t idx = 0; idx < items_count; ++idx) { auto item = d_format_items[idx]; if (length < item.length) return 1; if (item.item_type == format_char_type::literal) { // static character we'll just skip; // consume item.length bytes from string ptr += item.length; length -= item.length; continue; } timeparts->is_negative |= (*ptr == '-'); // special logic for each specifier int8_t item_length{0}; switch (item.value) { case 'D': // day timeparts->day = str2int(ptr, 11, item_length); break; case '-': // skip item_length = (*ptr == '-'); break; case 'H': // 24-hour timeparts->hour = parse_hour(ptr, item_length); hour_shift = 0; break; case 'I': // 12-hour timeparts->hour = parse_hour(ptr, item_length); break; case 'M': // minute timeparts->minute = parse_minute(ptr, item_length); break; case 'S': // [-]SS[.mmm][uuu][nnn] timeparts->second = parse_second(ptr, item_length); if (*(ptr + item_length) == '.') { item_length++; int64_t nanoseconds = str2int_fixed( ptr + item_length, 9, length - item_length, item_length); // normalize to nanoseconds timeparts->subsecond = nanoseconds; } break; case 'p': // AM/PM if (*ptr == 'P' && *(ptr + 1) == 'M') hour_shift = 12; else hour_shift = 0; item_length = 2; break; case 'R': // [-]HH:SS timeparts->hour = parse_hour(ptr, item_length); hour_shift = 0; item_length++; // : timeparts->minute = parse_minute(ptr + item_length, item_length); break; case 'T': // [-]HH:MM:SS timeparts->hour = parse_hour(ptr, item_length); hour_shift = 0; item_length++; // : timeparts->minute = parse_minute(ptr + item_length, item_length); item_length++; // : timeparts->second = parse_second(ptr + item_length, item_length); break; case 'r': // hh:MM:SS AM/PM timeparts->hour = parse_hour(ptr, item_length); item_length++; // : timeparts->minute = parse_minute(ptr + item_length, item_length); item_length++; // : timeparts->second = parse_second(ptr + item_length, item_length); item_length++; // space if (*(ptr + item_length) == 'P' && *(ptr + item_length + 1) == 'M') hour_shift = 12; else hour_shift = 0; item_length += 2; break; default: return 3; } ptr += item_length; length -= item_length; } // negate all if duration has negative sign if (timeparts->is_negative) { auto negate = [](auto i) { return (i < 0 ? i : -i); }; timeparts->day = negate(timeparts->day); timeparts->hour = negate(timeparts->hour); timeparts->minute = negate(timeparts->minute); timeparts->second = negate(timeparts->second); timeparts->subsecond = negate(timeparts->subsecond); hour_shift = -hour_shift; } timeparts->hour += hour_shift; return 0; } inline __device__ int64_t duration_from_parts(duration_component const* timeparts) { int32_t days = timeparts->day; auto hour = timeparts->hour; auto minute = timeparts->minute; auto second = timeparts->second; auto duration = duration_D(days) + cuda::std::chrono::hours(hour) + cuda::std::chrono::minutes(minute) + duration_s(second); if (cuda::std::is_same<T, duration_D>::value) return cuda::std::chrono::duration_cast<duration_D>(duration).count(); else if (cuda::std::is_same<T, duration_s>::value) return cuda::std::chrono::duration_cast<duration_s>(duration).count(); duration_ns subsecond(timeparts->subsecond); // ns if (cuda::std::is_same<T, duration_ms>::value) { return cuda::std::chrono::duration_cast<duration_ms>(duration + subsecond).count(); } else if (cuda::std::is_same<T, duration_us>::value) { return cuda::std::chrono::duration_cast<duration_us>(duration + subsecond).count(); } else if (cuda::std::is_same<T, duration_ns>::value) return cuda::std::chrono::duration_cast<duration_ns>(duration + subsecond).count(); return cuda::std::chrono::duration_cast<duration_ns>(duration + subsecond).count(); } __device__ T operator()(size_type idx) { if (d_strings.is_null(idx)) return T{0}; string_view d_str = d_strings.element<string_view>(idx); if (d_str.empty()) return T{0}; // duration_component timeparts = {0}; if (parse_into_parts(d_str, &timeparts)) return T{0}; // unexpected parse case // return static_cast<T>(duration_from_parts(&timeparts)); } }; /** * @brief This dispatch method is for converting strings to durations. * * The template function declaration ensures only duration types are used. */ struct dispatch_to_durations_fn { template <typename T, std::enable_if_t<cudf::is_duration<T>()>* = nullptr> void operator()(column_device_view const& d_strings, std::string const& format, mutable_column_view& results_view, rmm::cuda_stream_view stream) const { format_compiler compiler(format.c_str(), stream); auto d_items = compiler.compiled_format_items(); auto d_results = results_view.data<T>(); parse_duration<T> pfn{d_strings, d_items, compiler.items_count()}; thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(results_view.size()), d_results, pfn); } template <typename T, std::enable_if_t<not cudf::is_duration<T>()>* = nullptr> void operator()(column_device_view const&, std::string const&, mutable_column_view&, rmm::cuda_stream_view) const { CUDF_FAIL("Only durations type are expected for to_durations function"); } }; } // namespace std::unique_ptr<column> from_durations(column_view const& durations, std::string const& format, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = durations.size(); if (strings_count == 0) return make_empty_column(data_type{type_id::STRING}); return type_dispatcher( durations.type(), dispatch_from_durations_fn{}, durations, format, stream, mr); } std::unique_ptr<column> to_durations(strings_column_view const& strings, data_type duration_type, std::string const& format, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = strings.size(); if (strings_count == 0) return make_duration_column(duration_type, 0); CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); auto strings_column = column_device_view::create(strings.parent(), stream); auto d_column = *strings_column; auto results = make_duration_column(duration_type, strings_count, cudf::detail::copy_bitmask(strings.parent(), stream, mr), strings.null_count(), stream, mr); auto results_view = results->mutable_view(); cudf::type_dispatcher( duration_type, dispatch_to_durations_fn(), d_column, format, results_view, stream); results->set_null_count(strings.null_count()); return results; } } // namespace detail std::unique_ptr<column> from_durations(column_view const& durations, std::string const& format, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::from_durations(durations, format, rmm::cuda_stream_default, mr); } std::unique_ptr<column> to_durations(strings_column_view const& strings, data_type duration_type, std::string const& format, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::to_durations(strings, duration_type, format, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
a57fa21d53407174886567e46617b865fdf6e9cb.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/detail/get_value.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/types.hpp> #include <strings/convert/utilities.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <thrust/transform_reduce.h> #include <map> #include <vector> namespace cudf { namespace strings { namespace detail { namespace { // duration components timeparts structure struct alignas(4) duration_component { int32_t day; //-2,147,483,648 to 2,147,483,647 int32_t subsecond; // 000000000 to 999999999 int8_t hour; // 00 to 23 int8_t minute; // 00 to 59 int8_t second; // 00 to 59 bool is_negative; // true/false }; enum class format_char_type : int8_t { literal, // literal char type passed through specifier // duration format specifier }; /** * @brief Represents a format specifier or literal from a duration format string. * * Created by the format_compiler when parsing a format string. */ struct alignas(4) format_item { format_char_type item_type; // specifier or literal indicator char value; // specifier or literal value int8_t length; // item length in bytes static format_item new_specifier(char format_char, int8_t length) { return format_item{format_char_type::specifier, format_char, length}; } static format_item new_delimiter(char literal) { return format_item{format_char_type::literal, literal, 1}; } }; /** * @brief The format_compiler parses a duration format string into a vector of * format_items. * * The vector of format_items are used when parsing a string into duration * components and when formatting a string from duration components. */ struct format_compiler { std::string format; rmm::device_uvector<format_item> d_items; format_compiler(const char* format_, rmm::cuda_stream_view stream) : format(format_), d_items(0, stream) { static std::map<char, int8_t> const specifier_lengths = { {'-', -1}, // '-' if negative {'D', -1}, // 1 to 11 (not in std::format) {'H', 2}, // HH {'I', 2}, // HH {'M', 2}, // MM {'S', -1}, // 2 to 13 SS[.mmm][uuu][nnn] (uuu,nnn are not in std::format) {'p', 2}, // AM/PM {'R', 5}, // 5 HH:MM {'T', 8}, // 8 HH:MM:SS" {'r', 11} // HH:MM:SS AM/PM }; std::vector<format_item> items; const char* str = format.c_str(); auto length = format.length(); bool negative_sign{true}; while (length > 0) { char ch = *str++; length--; if (ch != '%') { items.push_back(format_item::new_delimiter(ch)); continue; } CUDF_EXPECTS(length > 0, "Unfinished specifier in duration format"); ch = *str++; length--; if (ch == '%') // escaped % char { items.push_back(format_item::new_delimiter(ch)); continue; } else if (ch == 'n') { items.push_back(format_item::new_delimiter('\n')); continue; } else if (ch == 't') { items.push_back(format_item::new_delimiter('\t')); continue; } if (ch == 'O') { CUDF_EXPECTS(*str == 'H' || *str == 'I' || *str == 'M' || *str == 'S', "locale's alternative representation not supported for specifier: " + std::string(1, *str)); ch = *str++; length--; items.push_back(format_item::new_specifier(ch, 2)); // without sign continue; } CUDF_EXPECTS(specifier_lengths.find(ch) != specifier_lengths.end(), "invalid format specifier: " + std::string(1, ch)); // negative sign should be present only once. if (negative_sign) { if (std::string("DHIMSRT").find_first_of(ch) != std::string::npos) { items.push_back(format_item::new_specifier('-', specifier_lengths.at('-'))); negative_sign = false; } } int8_t spec_length = specifier_lengths.at(ch); items.push_back(format_item::new_specifier(ch, spec_length)); } // create program in device memory d_items.resize(items.size(), stream); CUDA_TRY(cudaMemcpyAsync(d_items.data(), items.data(), items.size() * sizeof(items[0]), cudaMemcpyHostToDevice, stream.value())); } format_item const* compiled_format_items() { return d_items.data(); } size_type items_count() const { return static_cast<size_type>(d_items.size()); } }; template <typename T> __device__ void dissect_duration(T duration, duration_component* timeparts) { timeparts->is_negative = (duration < T{0}); timeparts->day = cuda::std::chrono::duration_cast<duration_D>(duration).count(); if (cuda::std::is_same<T, duration_D>::value) return; duration_s seconds = cuda::std::chrono::duration_cast<duration_s>(duration); timeparts->hour = (cuda::std::chrono::duration_cast<cuda::std::chrono::hours>(seconds) % duration_D(1)).count(); timeparts->minute = (cuda::std::chrono::duration_cast<cuda::std::chrono::minutes>(seconds) % cuda::std::chrono::hours(1)) .count(); timeparts->second = (seconds % cuda::std::chrono::minutes(1)).count(); if (not cuda::std::is_same<T, duration_s>::value) { timeparts->subsecond = (duration % duration_s(1)).count(); } } template <typename T> struct duration_to_string_size_fn { const column_device_view d_durations; const format_item* d_format_items; size_type items_count; __device__ int8_t format_length(char format_char, duration_component const* const timeparts) const { switch (format_char) { case '-': return timeparts->is_negative; break; case 'D': return count_digits(timeparts->day) - (timeparts->day < 0); break; case 'S': return 2 + (timeparts->subsecond == 0 ? 0 : [] { if (cuda::std::is_same<T, duration_ms>::value) return 3 + 1; // +1 is for dot if (cuda::std::is_same<T, duration_us>::value) return 6 + 1; // +1 is for dot if (cuda::std::is_same<T, duration_ns>::value) return 9 + 1; // +1 is for dot return 0; }()); break; default: return 2; } } __device__ size_type operator()(size_type idx) { if (d_durations.is_null(idx)) return 0; auto duration = d_durations.element<T>(idx); duration_component timeparts = {0}; // days, hours, minutes, seconds, subseconds(9) dissect_duration(duration, &timeparts); return thrust::transform_reduce( thrust::seq, d_format_items, d_format_items + items_count, [this, &timeparts] __device__(format_item item) -> size_type { if (item.item_type == format_char_type::literal) return 1; else if (item.length != -1) return item.length; else return format_length(item.value, &timeparts); }, size_type{0}, thrust::plus<size_type>()); } }; template <typename T> struct duration_to_string_fn : public duration_to_string_size_fn<T> { const int32_t* d_offsets; char* d_chars; using duration_to_string_size_fn<T>::d_durations; using duration_to_string_size_fn<T>::d_format_items; using duration_to_string_size_fn<T>::items_count; duration_to_string_fn(const column_device_view d_durations, const format_item* d_format_items, size_type items_count, const int32_t* d_offsets, char* d_chars) : duration_to_string_size_fn<T>{d_durations, d_format_items, items_count}, d_offsets(d_offsets), d_chars(d_chars) { } // utility to create (optionally) 0-padded integers (up to 10 chars) without negative sign. // min_digits==-1 indicates no 0-padding. __device__ char* int2str(char* str, int min_digits, int32_t value) { constexpr int MAX_DIGITS = 10; // largest 32-bit integer is 10 digits assert(min_digits <= MAX_DIGITS); if (value == 0) { do { *str++ = '0'; } while (--min_digits > 0); return str; } char digits[MAX_DIGITS] = {'0', '0', '0', '0', '0', '0', '0', '0', '0', '0'}; int digits_idx = 0; while (value != 0) { assert(digits_idx < MAX_DIGITS); digits[digits_idx++] = '0' + std::abs(value % 10); // next digit value = value / 10; } digits_idx = std::max(digits_idx, min_digits); // digits are backwards, reverse the string into the output while (digits_idx-- > 0) *str++ = digits[digits_idx]; return str; } __device__ char* int_to_2digitstr(char* str, int8_t value) { assert(value >= -99 && value <= 99); value = std::abs(value); str[0] = '0' + value / 10; str[1] = '0' + value % 10; return str + 2; } inline __device__ char* day(char* ptr, duration_component const* timeparts) { return int2str(ptr, -1, timeparts->day); } inline __device__ char* hour_12(char* ptr, duration_component const* timeparts) { return int_to_2digitstr(ptr, timeparts->hour % 12); } inline __device__ char* hour_24(char* ptr, duration_component const* timeparts) { return int_to_2digitstr(ptr, timeparts->hour); } inline __device__ char* am_or_pm(char* ptr, duration_component const* timeparts) { *ptr++ = (timeparts->hour / 12 == 0 ? 'A' : 'P'); *ptr++ = 'M'; return ptr; } inline __device__ char* minute(char* ptr, duration_component const* timeparts) { return int_to_2digitstr(ptr, timeparts->minute); } inline __device__ char* second(char* ptr, duration_component const* timeparts) { return int_to_2digitstr(ptr, timeparts->second); } inline __device__ char* subsecond(char* ptr, duration_component const* timeparts) { if (timeparts->subsecond == 0) return ptr; const int digits = duration_to_string_size_fn<T>::format_length('S', timeparts) - 3; *ptr = '.'; auto value = timeparts->subsecond; for (int idx = digits; idx > 0; idx--) { *(ptr + idx) = '0' + std::abs(value % 10); value /= 10; } return ptr + digits + 1; } __device__ char* format_from_parts(duration_component const* timeparts, char* ptr) { for (size_t idx = 0; idx < items_count; ++idx) { auto item = d_format_items[idx]; if (item.item_type == format_char_type::literal) { *ptr++ = item.value; continue; } // special logic for each specifier switch (item.value) { case 'D': // days ptr = day(ptr, timeparts); break; case '-': // - if value is negative if (timeparts->is_negative) *ptr++ = '-'; break; case 'H': // 24-hour ptr = hour_24(ptr, timeparts); break; case 'I': // 12-hour ptr = hour_12(ptr, timeparts); break; case 'M': // minute ptr = minute(ptr, timeparts); break; case 'S': // second ptr = second(ptr, timeparts); if (item.length == 2) break; case 'f': // sub-second ptr = subsecond(ptr, timeparts); break; case 'p': ptr = am_or_pm(ptr, timeparts); break; case 'R': // HH:MM 24-hour ptr = hour_24(ptr, timeparts); *ptr++ = ':'; ptr = minute(ptr, timeparts); break; case 'T': // HH:MM:SS 24-hour ptr = hour_24(ptr, timeparts); *ptr++ = ':'; ptr = minute(ptr, timeparts); *ptr++ = ':'; ptr = second(ptr, timeparts); break; case 'r': // HH:MM:SS AM/PM 12-hour ptr = hour_12(ptr, timeparts); *ptr++ = ':'; ptr = minute(ptr, timeparts); *ptr++ = ':'; ptr = second(ptr, timeparts); *ptr++ = ' '; ptr = am_or_pm(ptr, timeparts); break; default: // ignore everything else break; } } return ptr; } __device__ void operator()(size_type idx) { if (d_durations.is_null(idx)) return; auto duration = d_durations.template element<T>(idx); duration_component timeparts = {0}; // days, hours, minutes, seconds, subseconds(9) dissect_duration(duration, &timeparts); // convert to characters format_from_parts(&timeparts, d_chars + d_offsets[idx]); } }; /** * @brief This dispatch method is for converting durations into strings. * * The template function declaration ensures only duration types are used. */ struct dispatch_from_durations_fn { template <typename T, std::enable_if_t<cudf::is_duration<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const& durations, std::string const& format, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); format_compiler compiler(format.c_str(), stream); auto d_format_items = compiler.compiled_format_items(); size_type strings_count = durations.size(); auto column = column_device_view::create(durations, stream); auto d_column = *column; // copy null mask rmm::device_buffer null_mask = cudf::detail::copy_bitmask(durations, stream, mr); // build offsets column auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0), duration_to_string_size_fn<T>{d_column, d_format_items, compiler.items_count()}); auto offsets_column = detail::make_offsets_child_column( offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr); auto offsets_view = offsets_column->view(); auto d_new_offsets = offsets_view.template data<int32_t>(); // build chars column auto const chars_bytes = cudf::detail::get_value<int32_t>(offsets_column->view(), strings_count, stream); auto chars_column = detail::create_chars_child_column(strings_count, chars_bytes, stream, mr); auto d_chars = chars_column->mutable_view().template data<char>(); thrust::for_each_n(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), strings_count, duration_to_string_fn<T>{ d_column, d_format_items, compiler.items_count(), d_new_offsets, d_chars}); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), durations.null_count(), std::move(null_mask), stream, mr); } // non-duration types throw an exception template <typename T, typename... Args> std::enable_if_t<not cudf::is_duration<T>(), std::unique_ptr<column>> operator()(Args&&...) const { CUDF_FAIL("Values for from_durations function must be a duration type."); } }; static const __device__ __constant__ int32_t powers_of_ten[10] = { 1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L}; // this parses duration string into a duration integer template <typename T> // duration type struct parse_duration { column_device_view const d_strings; format_item const* d_format_items; size_type items_count; // function to parse string (maximum 10 digits) to integer. __device__ int32_t str2int(const char* str, int8_t max_bytes, int8_t& actual_length) { const char* ptr = (*str == '-' || *str == '+') ? str + 1 : str; int32_t value = 0; for (int8_t idx = 0; idx < max_bytes; ++idx) { char chr = *ptr++; if (chr < '0' || chr > '9') { ptr--; // roll back break; } value = (value * 10) + static_cast<int32_t>(chr - '0'); } actual_length += (ptr - str); return (*str == '-') ? -value : value; } // function to parse fraction of decimal value with trailing zeros removed. __device__ int32_t str2int_fixed(const char* str, int8_t fixed_width, size_type string_length, int8_t& actual_length) { const char* ptr = (*str == '.') ? str + 1 : str; int32_t value = 0; // parse till fixed_width or end of string. for (int8_t idx = 0; idx < fixed_width && idx < string_length; ++idx) { char chr = *ptr++; if (chr < '0' || chr > '9') { ptr--; // roll back break; } value = (value * 10) + static_cast<int32_t>(chr - '0'); } auto parsed_length = ptr - str; // compensate for missing trailing zeros if (parsed_length < fixed_width) value *= powers_of_ten[fixed_width - parsed_length]; actual_length += parsed_length; return value; } // parse 2 digit string to integer __device__ int8_t parse_2digit_int(const char* str, int8_t& actual_length) { const char* ptr = (*str == '-' || *str == '+') ? str + 1 : str; int8_t value = 0; if (*ptr >= '0' && *ptr <= '9') value = (value * 10) + static_cast<int32_t>(*ptr++ - '0'); if (*ptr >= '0' && *ptr <= '9') value = (value * 10) + static_cast<int32_t>(*ptr++ - '0'); actual_length += (ptr - str); return (*str == '-') ? -value : value; } inline __device__ int8_t parse_hour(const char* str, int8_t& actual_length) { return parse_2digit_int(str, actual_length); } inline __device__ int8_t parse_minute(const char* str, int8_t& actual_length) { return parse_2digit_int(str, actual_length); } inline __device__ int8_t parse_second(const char* str, int8_t& actual_length) { return parse_2digit_int(str, actual_length); } // Walk the format_items to read the datetime string. // Returns 0 if all ok. __device__ int parse_into_parts(string_view const& d_string, duration_component* timeparts) { auto ptr = d_string.data(); auto length = d_string.size_bytes(); int8_t hour_shift{0}; for (size_t idx = 0; idx < items_count; ++idx) { auto item = d_format_items[idx]; if (length < item.length) return 1; if (item.item_type == format_char_type::literal) { // static character we'll just skip; // consume item.length bytes from string ptr += item.length; length -= item.length; continue; } timeparts->is_negative |= (*ptr == '-'); // special logic for each specifier int8_t item_length{0}; switch (item.value) { case 'D': // day timeparts->day = str2int(ptr, 11, item_length); break; case '-': // skip item_length = (*ptr == '-'); break; case 'H': // 24-hour timeparts->hour = parse_hour(ptr, item_length); hour_shift = 0; break; case 'I': // 12-hour timeparts->hour = parse_hour(ptr, item_length); break; case 'M': // minute timeparts->minute = parse_minute(ptr, item_length); break; case 'S': // [-]SS[.mmm][uuu][nnn] timeparts->second = parse_second(ptr, item_length); if (*(ptr + item_length) == '.') { item_length++; int64_t nanoseconds = str2int_fixed( ptr + item_length, 9, length - item_length, item_length); // normalize to nanoseconds timeparts->subsecond = nanoseconds; } break; case 'p': // AM/PM if (*ptr == 'P' && *(ptr + 1) == 'M') hour_shift = 12; else hour_shift = 0; item_length = 2; break; case 'R': // [-]HH:SS timeparts->hour = parse_hour(ptr, item_length); hour_shift = 0; item_length++; // : timeparts->minute = parse_minute(ptr + item_length, item_length); break; case 'T': // [-]HH:MM:SS timeparts->hour = parse_hour(ptr, item_length); hour_shift = 0; item_length++; // : timeparts->minute = parse_minute(ptr + item_length, item_length); item_length++; // : timeparts->second = parse_second(ptr + item_length, item_length); break; case 'r': // hh:MM:SS AM/PM timeparts->hour = parse_hour(ptr, item_length); item_length++; // : timeparts->minute = parse_minute(ptr + item_length, item_length); item_length++; // : timeparts->second = parse_second(ptr + item_length, item_length); item_length++; // space if (*(ptr + item_length) == 'P' && *(ptr + item_length + 1) == 'M') hour_shift = 12; else hour_shift = 0; item_length += 2; break; default: return 3; } ptr += item_length; length -= item_length; } // negate all if duration has negative sign if (timeparts->is_negative) { auto negate = [](auto i) { return (i < 0 ? i : -i); }; timeparts->day = negate(timeparts->day); timeparts->hour = negate(timeparts->hour); timeparts->minute = negate(timeparts->minute); timeparts->second = negate(timeparts->second); timeparts->subsecond = negate(timeparts->subsecond); hour_shift = -hour_shift; } timeparts->hour += hour_shift; return 0; } inline __device__ int64_t duration_from_parts(duration_component const* timeparts) { int32_t days = timeparts->day; auto hour = timeparts->hour; auto minute = timeparts->minute; auto second = timeparts->second; auto duration = duration_D(days) + cuda::std::chrono::hours(hour) + cuda::std::chrono::minutes(minute) + duration_s(second); if (cuda::std::is_same<T, duration_D>::value) return cuda::std::chrono::duration_cast<duration_D>(duration).count(); else if (cuda::std::is_same<T, duration_s>::value) return cuda::std::chrono::duration_cast<duration_s>(duration).count(); duration_ns subsecond(timeparts->subsecond); // ns if (cuda::std::is_same<T, duration_ms>::value) { return cuda::std::chrono::duration_cast<duration_ms>(duration + subsecond).count(); } else if (cuda::std::is_same<T, duration_us>::value) { return cuda::std::chrono::duration_cast<duration_us>(duration + subsecond).count(); } else if (cuda::std::is_same<T, duration_ns>::value) return cuda::std::chrono::duration_cast<duration_ns>(duration + subsecond).count(); return cuda::std::chrono::duration_cast<duration_ns>(duration + subsecond).count(); } __device__ T operator()(size_type idx) { if (d_strings.is_null(idx)) return T{0}; string_view d_str = d_strings.element<string_view>(idx); if (d_str.empty()) return T{0}; // duration_component timeparts = {0}; if (parse_into_parts(d_str, &timeparts)) return T{0}; // unexpected parse case // return static_cast<T>(duration_from_parts(&timeparts)); } }; /** * @brief This dispatch method is for converting strings to durations. * * The template function declaration ensures only duration types are used. */ struct dispatch_to_durations_fn { template <typename T, std::enable_if_t<cudf::is_duration<T>()>* = nullptr> void operator()(column_device_view const& d_strings, std::string const& format, mutable_column_view& results_view, rmm::cuda_stream_view stream) const { format_compiler compiler(format.c_str(), stream); auto d_items = compiler.compiled_format_items(); auto d_results = results_view.data<T>(); parse_duration<T> pfn{d_strings, d_items, compiler.items_count()}; thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(results_view.size()), d_results, pfn); } template <typename T, std::enable_if_t<not cudf::is_duration<T>()>* = nullptr> void operator()(column_device_view const&, std::string const&, mutable_column_view&, rmm::cuda_stream_view) const { CUDF_FAIL("Only durations type are expected for to_durations function"); } }; } // namespace std::unique_ptr<column> from_durations(column_view const& durations, std::string const& format, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = durations.size(); if (strings_count == 0) return make_empty_column(data_type{type_id::STRING}); return type_dispatcher( durations.type(), dispatch_from_durations_fn{}, durations, format, stream, mr); } std::unique_ptr<column> to_durations(strings_column_view const& strings, data_type duration_type, std::string const& format, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { size_type strings_count = strings.size(); if (strings_count == 0) return make_duration_column(duration_type, 0); CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); auto strings_column = column_device_view::create(strings.parent(), stream); auto d_column = *strings_column; auto results = make_duration_column(duration_type, strings_count, cudf::detail::copy_bitmask(strings.parent(), stream, mr), strings.null_count(), stream, mr); auto results_view = results->mutable_view(); cudf::type_dispatcher( duration_type, dispatch_to_durations_fn(), d_column, format, results_view, stream); results->set_null_count(strings.null_count()); return results; } } // namespace detail std::unique_ptr<column> from_durations(column_view const& durations, std::string const& format, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::from_durations(durations, format, rmm::cuda_stream_default, mr); } std::unique_ptr<column> to_durations(strings_column_view const& strings, data_type duration_type, std::string const& format, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::to_durations(strings, duration_type, format, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
6df37f7295a83fb83733a8897b7bf43a09344b14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha, Arman Pazouki, Wei Hu // ============================================================================= // // Class for performing time integration in fluid system. // ============================================================================= #include "chrono_fsi/physics/ChFluidDynamics.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" using std::cout; using std::endl; namespace chrono { namespace fsi { // ----------------------------------------------------------------------------- // Device function to calculate the share of density influence on a given // particle from all other particle in a given cell __device__ void collideCellDensityReInit(Real& numerator, Real& denominator, int3 gridPos, uint index, Real3 posRadA, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd) { uint gridHash = calcGridHash(gridPos); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real4 rhoPreMuB = sortedRhoPreMu[j]; Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML) continue; numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w); denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w); } } } // ----------------------------------------------------------------------------- // Kernel to apply periodic BC along x __global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD, Real4* rhoPresMuD, uint* activityIdentifierD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; uint activity = activityIdentifierD[index]; if (activity == 0) return; // no need to do anything if it is not an active particle Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.x > paramsD.cMax.x) { posRad.x -= (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) rhoPresMuD[index].y += paramsD.deltaPress.x; return; } if (posRad.x < paramsD.cMin.x) { posRad.x += (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) rhoPresMuD[index].y -= paramsD.deltaPress.x; return; } } // ----------------------------------------------------------------------------- // Kernel to apply inlet/outlet BC along x __global__ void ApplyInletBoundaryXKernel(Real4* posRadD, Real3* VelMassD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; Real4 rhoPresMu = rhoPresMuD[index]; if (rhoPresMu.w > 0.0) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.x > paramsD.cMax.x) { posRad.x -= (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w <= 0.0) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x; rhoPresMuD[index] = rhoPresMu; } } if (posRad.x < paramsD.cMin.x) { posRad.x += (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); VelMassD[index] = mR3(paramsD.V_in.x, 0, 0); if (rhoPresMu.w <= -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x; rhoPresMuD[index] = rhoPresMu; } } if (posRad.x > -paramsD.x_in) rhoPresMuD[index].y = 0; if (posRad.x < paramsD.x_in) VelMassD[index] = mR3(paramsD.V_in.x, 0, 0); } // ----------------------------------------------------------------------------- // Kernel to apply periodic BC along y __global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD, Real4* rhoPresMuD, uint* activityIdentifierD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; uint activity = activityIdentifierD[index]; if (activity == 0) return; // no need to do anything if it is not an active particle Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.y > paramsD.cMax.y) { posRad.y -= (paramsD.cMax.y - paramsD.cMin.y); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y; rhoPresMuD[index] = rhoPresMu; } return; } if (posRad.y < paramsD.cMin.y) { posRad.y += (paramsD.cMax.y - paramsD.cMin.y); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y; rhoPresMuD[index] = rhoPresMu; } return; } } // ----------------------------------------------------------------------------- // Kernel to apply periodic BC along z __global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD, Real4* rhoPresMuD, uint* activityIdentifierD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; uint activity = activityIdentifierD[index]; if (activity == 0) return; // no need to do anything if it is not an active particle Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.z > paramsD.cMax.z) { posRad.z -= (paramsD.cMax.z - paramsD.cMin.z); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z; rhoPresMuD[index] = rhoPresMu; } return; } if (posRad.z < paramsD.cMin.z) { posRad.z += (paramsD.cMax.z - paramsD.cMin.z); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z; rhoPresMuD[index] = rhoPresMu; } return; } } // ----------------------------------------------------------------------------- // Kernel to keep particle inside the simulation domain __global__ void ApplyOutOfBoundaryKernel(Real4* posRadD, Real4* rhoPresMuD, Real3* velMasD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real3 vel = mR3(velMasD[index]); Real h = posRadD[index].w; if (posRad.x > 0.5 * paramsD.boxDimX) posRad.x = 0.5 * paramsD.boxDimX; if (posRad.x < -0.5 * paramsD.boxDimX) posRad.x = -0.5 * paramsD.boxDimX; if (posRad.y > 0.5 * paramsD.boxDimY) posRad.y = 0.5 * paramsD.boxDimY; if (posRad.y < -0.5 * paramsD.boxDimY) posRad.y = -0.5 * paramsD.boxDimY; if (posRad.z > 1.0 * paramsD.boxDimZ) posRad.z = 1.0 * paramsD.boxDimZ; if (posRad.z < -0.0 * paramsD.boxDimZ) posRad.z = -0.0 * paramsD.boxDimZ; posRadD[index] = mR4(posRad, h); velMasD[index] = mR3(vel); return; } // ----------------------------------------------------------------------------- // Kernel to update the fluid properities. It updates the stress tensor, // density, velocity and position relying on explicit Euler scheme. // Pressure is obtained from the density and an Equation of State. __global__ void UpdateFluidD(Real4* posRadD, Real3* velMasD, Real4* rhoPresMuD, Real3* tauXxYyZzD, Real3* tauXyXzYzD, Real3* vel_XSPH_D, Real4* derivVelRhoD, Real3* derivTauXxYyZzD, Real3* derivTauXyXzYzD, Real4* sr_tau_I_mu_iD, uint* activityIdentifierD, uint* freeSurfaceIdD, int2 updatePortion, Real dT, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; index += updatePortion.x; if (index >= updatePortion.y) return; uint activity = activityIdentifierD[index]; if (activity == 0) return; Real4 derivVelRho = derivVelRhoD[index]; Real4 rhoPresMu = rhoPresMuD[index]; Real h = posRadD[index].w; Real p_tr, p_n; if (rhoPresMu.w < 0) { // This is only implemented for granular material if (paramsD.elastic_SPH) { //-------------------------------- // ** total stress tau //-------------------------------- Real3 tauXxYyZz = tauXxYyZzD[index]; Real3 tauXyXzYz = tauXyXzYzD[index]; Real3 derivTauXxYyZz = derivTauXxYyZzD[index]; Real3 derivTauXyXzYz = derivTauXyXzYzD[index]; Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT; Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT; // check if there is a plastic flow p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z); tauXxYyZz.x += p_n; tauXxYyZz.y += p_n; tauXxYyZz.z += p_n; p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z); updatedTauXxYyZz.x += p_tr; updatedTauXxYyZz.y += p_tr; updatedTauXxYyZz.z += p_tr; Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) + square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) + 2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z); Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) + 2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z); tau_tr = sqrt(0.5 * tau_tr); tau_n = sqrt(0.5 * tau_n); Real Chi = abs(tau_tr - tau_n) * paramsD.INV_G_shear / dT; // should use the positive magnitude according to "A // constitutive law for dense granular flows" Nature 2006 Real mu_s = paramsD.mu_fric_s; Real mu_2 = paramsD.mu_fric_2; // Real s_0 = mu_s * p_tr; // Real s_2 = mu_2 * p_tr; // Real xi = 1.1; Real dia = paramsD.ave_diam; Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);// Real I = Chi * dia * sqrt( paramsD.rho0 / ( p_tr + 1.0e9 ) ); Real coh = paramsD.Coh_coeff; // Real Chi_cri = 0.1; // if (Chi < Chi_cri){ // coh = paramsD.Coh_coeff * (1.0 - sin(-1.57 + 3.14 * (Chi / Chi_cri))) / 2.0; // // coh = paramsD.Coh_coeff * (1.0 - I / I_cri); // } else { // coh = 0.0; // } Real inv_mus = 1.0 / paramsD.mu_fric_s; Real p_cri = - coh * inv_mus; if (p_tr > p_cri) { Real mu = mu_s + (mu_2 - mu_s) * (I + 1.0e-9) / (I0 + I + 1.0e-9); // Real G0 = paramsD.G_shear; // Real alpha = xi*G0*I0*(dT)*sqrt(p_tr); // Real B0 = s_2 + tau_tr + alpha; // Real H0 = s_2*tau_tr + s_0*alpha; // Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9); // if(tau_tr>s_0){ // Real coeff = tau_n1/(tau_tr+1e-9); // updatedTauXxYyZz = updatedTauXxYyZz*coeff; // updatedTauXyXzYz = updatedTauXyXzYz*coeff; // } Real tau_max = p_tr * mu + coh; // p_tr*paramsD.Q_FA; // should use tau_max instead of s_0 according to // "A constitutive law for dense granular flows" Nature 2006 if (tau_tr > tau_max) { Real coeff = tau_max / (tau_tr + 1e-9); updatedTauXxYyZz = updatedTauXxYyZz * coeff; updatedTauXyXzYz = updatedTauXyXzYz * coeff; } } // Set stress to zero if the pressure is smaller than the threshold if (p_tr < p_cri) { updatedTauXxYyZz = mR3(0.0); updatedTauXyXzYz = mR3(0.0); p_tr = 0.0; // Real coeff = abs(p_cri / (p_tr + 1e-9)); // if (p_tr < 2.0 * p_cri){ // coeff = 0.0; // } else { // coeff = abs(1.0 - (p_tr - p_cri) / p_cri); // } // updatedTauXxYyZz = updatedTauXxYyZz * coeff; // updatedTauXyXzYz = updatedTauXyXzYz * coeff; // p_tr = p_cri * coeff; } // Set stress to zero if the particle is close to free surface if (freeSurfaceIdD[index] == 1) { updatedTauXxYyZz = mR3(0.0); updatedTauXyXzYz = mR3(0.0); p_tr = 0.0; } if (paramsD.output_length == 2) { Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) + square(updatedTauXxYyZz.z) + 2.0 * (square(updatedTauXyXzYz.x) + square(updatedTauXyXzYz.y) + square(updatedTauXyXzYz.z)); tau_tr = sqrt(0.5 * tau_tr); sr_tau_I_mu_iD[index].y = tau_tr; } tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr); tauXyXzYzD[index] = updatedTauXyXzYz; } //------------- // ** position //------------- Real3 vel_XSPH = velMasD[index] + vel_XSPH_D[index]; // paramsD.EPS_XSPH * Real3 posRad = mR3(posRadD[index]); Real3 updatedPositon = posRad + vel_XSPH * dT; if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) { printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n"); *isErrorD = true; return; } posRadD[index] = mR4(updatedPositon, h); //------------- // ** velocity //------------- // Note that the velocity update should not use the XSPH contribution // It adds dissipation to the solution, and provides numerical damping Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index] Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT; velMasD[index] = updatedVelocity; //------------- // ** density //------------- if (paramsD.elastic_SPH) { // This is only implemented for granular material rhoPresMu.y = p_tr; rhoPresMu.x = paramsD.rho0; } else { Real rho2 = rhoPresMu.x + derivVelRho.w * dT; rhoPresMu.y = Eos(rho2, rhoPresMu.w); rhoPresMu.x = rho2; } if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) { printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n"); *isErrorD = true; return; } rhoPresMuD[index] = rhoPresMu; } // Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time // derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces. // calculate the force that is f=m dv/dt // derivVelRhoD[index] *= paramsD.markerMass; } //------------------------------------------------------------------------------ __global__ void Update_Fluid_State(Real3* new_vel, Real4* posRad, Real3* velMas, Real4* rhoPreMu, int4 updatePortion, double dT, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= updatePortion.y) return; velMas[i_idx] = new_vel[i_idx]; Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx]; Real h = posRad[i_idx].w; posRad[i_idx] = mR4(newpos, h); if (!(isfinite(posRad[i_idx].x) && isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) { printf("Error! particle %d position is NAN: thrown from UpdateFluidDKernel %f,%f,%f,%f\n", i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w); } if (!(isfinite(rhoPreMu[i_idx].x) && isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) { printf("Error! particle %d rhoPreMu is NAN: thrown from UpdateFluidDKernel ! %f,%f,%f,%f\n", i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w); } if (!(isfinite(velMas[i_idx].x) && isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) { printf("Error! particle %d velocity is NAN: thrown from UpdateFluidDKernel !%f,%f,%f\n", i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z); } } // ----------------------------------------------------------------------------- // Kernel for updating the density. // It calculates the density of the particle. It does include the normalization // close to the boundaries and free surface. __global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; // read particle data from sorted arrays Real3 posRadA = mR3(sortedPosRad[index]); Real4 rhoPreMuA = sortedRhoPreMu[index]; // get address in grid int3 gridPos = calcGridPos(posRadA); Real numerator = 0.0; Real denominator = 0.0; // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); collideCellDensityReInit(numerator, denominator, neighbourPos, index, posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd); } } } rhoPreMuA.x = numerator; // denominator; // rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w); dummySortedRhoPreMu[index] = rhoPreMuA; } // ----------------------------------------------------------------------------- // Kernel for updating the activity of all particles. __global__ void UpdateActivityD(Real4* posRadD, Real3* velMasD, Real3* posRigidBodiesD, Real3* pos_fsi_fea_D, uint* activityIdentifierD, uint* extendedActivityIdD, int2 updatePortion, Real Time, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; index += updatePortion.x; if (index >= updatePortion.y) return; // Set the particle as an active particle activityIdentifierD[index] = 1; extendedActivityIdD[index] = 1; // If during the settling phase, all particles are active if (Time < paramsD.settlingTime) return; size_t numRigidBodies = numObjectsD.numRigidBodies; size_t numFlexNodes = numObjectsD.numFlexNodes; size_t numTotal = numRigidBodies + numFlexNodes; // Check the activity of this particle uint isNotActive = 0; uint isNotExtended = 0; Real3 Acdomain = paramsD.bodyActiveDomain; Real3 ExAcdomain = paramsD.bodyActiveDomain + mR3(2 * RESOLUTION_LENGTH_MULT * paramsD.HSML); Real3 posRadA = mR3(posRadD[index]); for (uint num = 0; num < numRigidBodies; num++) { Real3 detPos = posRadA - posRigidBodiesD[num]; if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y || abs(detPos.z) > Acdomain.z) isNotActive = isNotActive + 1; if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y || abs(detPos.z) > ExAcdomain.z) isNotExtended = isNotExtended + 1; } for (uint num = 0; num < numFlexNodes; num++) { Real3 detPos = posRadA - pos_fsi_fea_D[num]; if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y || abs(detPos.z) > Acdomain.z) isNotActive = isNotActive + 1; if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y || abs(detPos.z) > ExAcdomain.z) isNotExtended = isNotExtended + 1; } // Set the particle as an inactive particle if needed if (isNotActive == numTotal && numTotal > 0) { activityIdentifierD[index] = 0; velMasD[index] = mR3(0.0); } if (isNotExtended == numTotal && numTotal > 0) extendedActivityIdD[index] = 0; return; } // ----------------------------------------------------------------------------- // CLASS FOR FLUID DYNAMICS SYSTEM // ----------------------------------------------------------------------------- ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker, ChSystemFsi_impl& otherFsiSystem, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<ChCounters> otherNumObjects, TimeIntegrator type, bool verb) : fsiSystem(otherFsiSystem), paramsH(otherParamsH), numObjectsH(otherNumObjects), integrator_type(type), verbose(verb) { switch (integrator_type) { case TimeIntegrator::I2SPH: forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>( otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD, fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb); if (verbose) { cout << "====== Created an I2SPH framework" << endl; } break; case TimeIntegrator::IISPH: forceSystem = chrono_types::make_shared<ChFsiForceIISPH>( otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD, fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb); if (verbose) { cout << "====== Created an IISPH framework" << endl; } break; case TimeIntegrator::EXPLICITSPH: forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>( otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD, fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb); if (verbose) { cout << "====== Created a WCSPH framework" << endl; } break; // Extend this function with your own linear solvers default: forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>( otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD, fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb); cout << "Selected integrator type not implemented, reverting back to WCSPH" << endl; } } // ----------------------------------------------------------------------------- ChFluidDynamics::~ChFluidDynamics() {} // ----------------------------------------------------------------------------- void ChFluidDynamics::Initialize() { forceSystem->Initialize(); hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters)); hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams)); } // ----------------------------------------------------------------------------- void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2, std::shared_ptr<SphMarkerDataD> sphMarkersD1, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD, Real dT, Real Time) { if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) { this->UpdateActivity(sphMarkersD1, sphMarkersD2, fsiBodiesD, fsiMeshD, Time); forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD); } else forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD); if (integrator_type == TimeIntegrator::IISPH) this->UpdateFluid_Implicit(sphMarkersD2); else if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) this->UpdateFluid(sphMarkersD1, dT); this->ApplyBoundarySPH_Markers(sphMarkersD2); } // ----------------------------------------------------------------------------- void ChFluidDynamics::UpdateActivity(std::shared_ptr<SphMarkerDataD> sphMarkersD1, std::shared_ptr<SphMarkerDataD> sphMarkersD2, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD, Real Time) { // Update portion of the SPH particles (should be all particles here) int2 updatePortion = mI2(0, (int)numObjectsH->numAllMarkers); bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); //------------------------ uint numBlocks, numThreads; computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads); hipLaunchKernelGGL(( UpdateActivityD), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sphMarkersD2->posRadD), mR3CAST(sphMarkersD1->velMasD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D), mR3CAST(fsiMeshD->pos_fsi_fea_D), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD), U1CAST(fsiSystem.fsiGeneralData->extendedActivityIdD), updatePortion, Time, isErrorD); hipDeviceSynchronize(); cudaCheckError(); //------------------------ hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) throw std::runtime_error("Error! program crashed in UpdateActivityD!\n"); hipFree(isErrorD); free(isErrorH); } // ----------------------------------------------------------------------------- void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) { // Update portion of the SPH particles (should be fluid particles only here) int2 updatePortion = mI2(0, fsiSystem.fsiGeneralData->referenceArray[0].y); bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); //------------------------ uint numBlocks, numThreads; computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads); hipLaunchKernelGGL(( UpdateFluidD), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD), mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D), mR4CAST(fsiSystem.fsiGeneralData->derivVelRhoD), mR3CAST(fsiSystem.fsiGeneralData->derivTauXxYyZzD), mR3CAST(fsiSystem.fsiGeneralData->derivTauXyXzYzD), mR4CAST(fsiSystem.fsiGeneralData->sr_tau_I_mu_i), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD), U1CAST(fsiSystem.fsiGeneralData->freeSurfaceIdD), updatePortion, dT, isErrorD); hipDeviceSynchronize(); cudaCheckError(); //------------------------ hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) throw std::runtime_error("Error! program crashed in UpdateFluidD!\n"); hipFree(isErrorD); free(isErrorH); } // ----------------------------------------------------------------------------- void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint numThreads, numBlocks; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0; int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0; int4 updatePortion = mI4(fsiSystem.fsiGeneralData->referenceArray[haveHelper].x, fsiSystem.fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0); cout << "time step in UpdateFluid_Implicit " << paramsH->dT << endl; bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); hipMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Update_Fluid_State), dim3(numBlocks), dim3(numThreads), 0, 0, mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D), mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD), updatePortion, paramsH->dT, isErrorD); hipDeviceSynchronize(); cudaCheckError(); hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost); if (*isErrorH == true) throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n"); hipFree(isErrorD); free(isErrorH); } // ----------------------------------------------------------------------------- // Apply periodic boundary conditions in x, y, and z directions void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint numBlocks, numThreads; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); hipLaunchKernelGGL(( ApplyPeriodicBoundaryXKernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); hipDeviceSynchronize(); cudaCheckError(); hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); hipDeviceSynchronize(); cudaCheckError(); hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); hipDeviceSynchronize(); cudaCheckError(); // ApplyOutOfBoundaryKernel<<<numBlocks, numThreads>>> // (mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->velMasD)); // hipDeviceSynchronize(); // cudaCheckError(); } // ----------------------------------------------------------------------------- // Apply periodic boundary conditions in y, and z. // The inlet/outlet BC is applied in the x direction. // This functions needs to be tested. void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint numBlocks, numThreads; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); hipLaunchKernelGGL(( ApplyInletBoundaryXKernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD)); hipDeviceSynchronize(); cudaCheckError(); // these are useful anyway for out of bound particles hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); hipDeviceSynchronize(); cudaCheckError(); hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); hipDeviceSynchronize(); cudaCheckError(); } // ----------------------------------------------------------------------------- void ChFluidDynamics::DensityReinitialization() { uint numBlocks, numThreads; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers); thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0)); hipLaunchKernelGGL(( ReCalcDensityD_F1), dim3(numBlocks), dim3(numThreads), 0, 0, mR4CAST(dummySortedRhoPreMu), mR4CAST(fsiSystem.sortedSphMarkersD->posRadD), mR3CAST(fsiSystem.sortedSphMarkersD->velMasD), mR4CAST(fsiSystem.sortedSphMarkersD->rhoPresMuD), U1CAST(fsiSystem.markersProximityD->gridMarkerIndexD), U1CAST(fsiSystem.markersProximityD->cellStartD), U1CAST(fsiSystem.markersProximityD->cellEndD)); hipDeviceSynchronize(); cudaCheckError(); ChFsiForce::CopySortedToOriginal_NonInvasive_R4( fsiSystem.sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu, fsiSystem.markersProximityD->gridMarkerIndexD); ChFsiForce::CopySortedToOriginal_NonInvasive_R4( fsiSystem.sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu, fsiSystem.markersProximityD->gridMarkerIndexD); dummySortedRhoPreMu.clear(); } } // namespace fsi } // end namespace chrono
6df37f7295a83fb83733a8897b7bf43a09344b14.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha, Arman Pazouki, Wei Hu // ============================================================================= // // Class for performing time integration in fluid system. // ============================================================================= #include "chrono_fsi/physics/ChFluidDynamics.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" using std::cout; using std::endl; namespace chrono { namespace fsi { // ----------------------------------------------------------------------------- // Device function to calculate the share of density influence on a given // particle from all other particle in a given cell __device__ void collideCellDensityReInit(Real& numerator, Real& denominator, int3 gridPos, uint index, Real3 posRadA, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* cellStart, uint* cellEnd) { uint gridHash = calcGridHash(gridPos); uint startIndex = cellStart[gridHash]; if (startIndex != 0xffffffff) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j < endIndex; j++) { Real3 posRadB = mR3(sortedPosRad[j]); Real4 rhoPreMuB = sortedRhoPreMu[j]; Real3 dist3 = Distance(posRadA, posRadB); Real d = length(dist3); if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML) continue; numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w); denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w); } } } // ----------------------------------------------------------------------------- // Kernel to apply periodic BC along x __global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD, Real4* rhoPresMuD, uint* activityIdentifierD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; uint activity = activityIdentifierD[index]; if (activity == 0) return; // no need to do anything if it is not an active particle Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.x > paramsD.cMax.x) { posRad.x -= (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) rhoPresMuD[index].y += paramsD.deltaPress.x; return; } if (posRad.x < paramsD.cMin.x) { posRad.x += (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) rhoPresMuD[index].y -= paramsD.deltaPress.x; return; } } // ----------------------------------------------------------------------------- // Kernel to apply inlet/outlet BC along x __global__ void ApplyInletBoundaryXKernel(Real4* posRadD, Real3* VelMassD, Real4* rhoPresMuD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; Real4 rhoPresMu = rhoPresMuD[index]; if (rhoPresMu.w > 0.0) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.x > paramsD.cMax.x) { posRad.x -= (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w <= 0.0) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x; rhoPresMuD[index] = rhoPresMu; } } if (posRad.x < paramsD.cMin.x) { posRad.x += (paramsD.cMax.x - paramsD.cMin.x); posRadD[index] = mR4(posRad, h); VelMassD[index] = mR3(paramsD.V_in.x, 0, 0); if (rhoPresMu.w <= -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x; rhoPresMuD[index] = rhoPresMu; } } if (posRad.x > -paramsD.x_in) rhoPresMuD[index].y = 0; if (posRad.x < paramsD.x_in) VelMassD[index] = mR3(paramsD.V_in.x, 0, 0); } // ----------------------------------------------------------------------------- // Kernel to apply periodic BC along y __global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD, Real4* rhoPresMuD, uint* activityIdentifierD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; uint activity = activityIdentifierD[index]; if (activity == 0) return; // no need to do anything if it is not an active particle Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.y > paramsD.cMax.y) { posRad.y -= (paramsD.cMax.y - paramsD.cMin.y); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y; rhoPresMuD[index] = rhoPresMu; } return; } if (posRad.y < paramsD.cMin.y) { posRad.y += (paramsD.cMax.y - paramsD.cMin.y); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y; rhoPresMuD[index] = rhoPresMu; } return; } } // ----------------------------------------------------------------------------- // Kernel to apply periodic BC along z __global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD, Real4* rhoPresMuD, uint* activityIdentifierD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; uint activity = activityIdentifierD[index]; if (activity == 0) return; // no need to do anything if it is not an active particle Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real h = posRadD[index].w; if (posRad.z > paramsD.cMax.z) { posRad.z -= (paramsD.cMax.z - paramsD.cMin.z); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z; rhoPresMuD[index] = rhoPresMu; } return; } if (posRad.z < paramsD.cMin.z) { posRad.z += (paramsD.cMax.z - paramsD.cMin.z); posRadD[index] = mR4(posRad, h); if (rhoPresMu.w < -.1) { rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z; rhoPresMuD[index] = rhoPresMu; } return; } } // ----------------------------------------------------------------------------- // Kernel to keep particle inside the simulation domain __global__ void ApplyOutOfBoundaryKernel(Real4* posRadD, Real4* rhoPresMuD, Real3* velMasD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; Real4 rhoPresMu = rhoPresMuD[index]; if (fabs(rhoPresMu.w) < .1) return; // no need to do anything if it is a boundary particle Real3 posRad = mR3(posRadD[index]); Real3 vel = mR3(velMasD[index]); Real h = posRadD[index].w; if (posRad.x > 0.5 * paramsD.boxDimX) posRad.x = 0.5 * paramsD.boxDimX; if (posRad.x < -0.5 * paramsD.boxDimX) posRad.x = -0.5 * paramsD.boxDimX; if (posRad.y > 0.5 * paramsD.boxDimY) posRad.y = 0.5 * paramsD.boxDimY; if (posRad.y < -0.5 * paramsD.boxDimY) posRad.y = -0.5 * paramsD.boxDimY; if (posRad.z > 1.0 * paramsD.boxDimZ) posRad.z = 1.0 * paramsD.boxDimZ; if (posRad.z < -0.0 * paramsD.boxDimZ) posRad.z = -0.0 * paramsD.boxDimZ; posRadD[index] = mR4(posRad, h); velMasD[index] = mR3(vel); return; } // ----------------------------------------------------------------------------- // Kernel to update the fluid properities. It updates the stress tensor, // density, velocity and position relying on explicit Euler scheme. // Pressure is obtained from the density and an Equation of State. __global__ void UpdateFluidD(Real4* posRadD, Real3* velMasD, Real4* rhoPresMuD, Real3* tauXxYyZzD, Real3* tauXyXzYzD, Real3* vel_XSPH_D, Real4* derivVelRhoD, Real3* derivTauXxYyZzD, Real3* derivTauXyXzYzD, Real4* sr_tau_I_mu_iD, uint* activityIdentifierD, uint* freeSurfaceIdD, int2 updatePortion, Real dT, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; index += updatePortion.x; if (index >= updatePortion.y) return; uint activity = activityIdentifierD[index]; if (activity == 0) return; Real4 derivVelRho = derivVelRhoD[index]; Real4 rhoPresMu = rhoPresMuD[index]; Real h = posRadD[index].w; Real p_tr, p_n; if (rhoPresMu.w < 0) { // This is only implemented for granular material if (paramsD.elastic_SPH) { //-------------------------------- // ** total stress tau //-------------------------------- Real3 tauXxYyZz = tauXxYyZzD[index]; Real3 tauXyXzYz = tauXyXzYzD[index]; Real3 derivTauXxYyZz = derivTauXxYyZzD[index]; Real3 derivTauXyXzYz = derivTauXyXzYzD[index]; Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT; Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT; // check if there is a plastic flow p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z); tauXxYyZz.x += p_n; tauXxYyZz.y += p_n; tauXxYyZz.z += p_n; p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z); updatedTauXxYyZz.x += p_tr; updatedTauXxYyZz.y += p_tr; updatedTauXxYyZz.z += p_tr; Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) + square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) + 2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z); Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) + 2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z); tau_tr = sqrt(0.5 * tau_tr); tau_n = sqrt(0.5 * tau_n); Real Chi = abs(tau_tr - tau_n) * paramsD.INV_G_shear / dT; // should use the positive magnitude according to "A // constitutive law for dense granular flows" Nature 2006 Real mu_s = paramsD.mu_fric_s; Real mu_2 = paramsD.mu_fric_2; // Real s_0 = mu_s * p_tr; // Real s_2 = mu_2 * p_tr; // Real xi = 1.1; Real dia = paramsD.ave_diam; Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);// Real I = Chi * dia * sqrt( paramsD.rho0 / ( p_tr + 1.0e9 ) ); Real coh = paramsD.Coh_coeff; // Real Chi_cri = 0.1; // if (Chi < Chi_cri){ // coh = paramsD.Coh_coeff * (1.0 - sin(-1.57 + 3.14 * (Chi / Chi_cri))) / 2.0; // // coh = paramsD.Coh_coeff * (1.0 - I / I_cri); // } else { // coh = 0.0; // } Real inv_mus = 1.0 / paramsD.mu_fric_s; Real p_cri = - coh * inv_mus; if (p_tr > p_cri) { Real mu = mu_s + (mu_2 - mu_s) * (I + 1.0e-9) / (I0 + I + 1.0e-9); // Real G0 = paramsD.G_shear; // Real alpha = xi*G0*I0*(dT)*sqrt(p_tr); // Real B0 = s_2 + tau_tr + alpha; // Real H0 = s_2*tau_tr + s_0*alpha; // Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9); // if(tau_tr>s_0){ // Real coeff = tau_n1/(tau_tr+1e-9); // updatedTauXxYyZz = updatedTauXxYyZz*coeff; // updatedTauXyXzYz = updatedTauXyXzYz*coeff; // } Real tau_max = p_tr * mu + coh; // p_tr*paramsD.Q_FA; // should use tau_max instead of s_0 according to // "A constitutive law for dense granular flows" Nature 2006 if (tau_tr > tau_max) { Real coeff = tau_max / (tau_tr + 1e-9); updatedTauXxYyZz = updatedTauXxYyZz * coeff; updatedTauXyXzYz = updatedTauXyXzYz * coeff; } } // Set stress to zero if the pressure is smaller than the threshold if (p_tr < p_cri) { updatedTauXxYyZz = mR3(0.0); updatedTauXyXzYz = mR3(0.0); p_tr = 0.0; // Real coeff = abs(p_cri / (p_tr + 1e-9)); // if (p_tr < 2.0 * p_cri){ // coeff = 0.0; // } else { // coeff = abs(1.0 - (p_tr - p_cri) / p_cri); // } // updatedTauXxYyZz = updatedTauXxYyZz * coeff; // updatedTauXyXzYz = updatedTauXyXzYz * coeff; // p_tr = p_cri * coeff; } // Set stress to zero if the particle is close to free surface if (freeSurfaceIdD[index] == 1) { updatedTauXxYyZz = mR3(0.0); updatedTauXyXzYz = mR3(0.0); p_tr = 0.0; } if (paramsD.output_length == 2) { Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) + square(updatedTauXxYyZz.z) + 2.0 * (square(updatedTauXyXzYz.x) + square(updatedTauXyXzYz.y) + square(updatedTauXyXzYz.z)); tau_tr = sqrt(0.5 * tau_tr); sr_tau_I_mu_iD[index].y = tau_tr; } tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr); tauXyXzYzD[index] = updatedTauXyXzYz; } //------------- // ** position //------------- Real3 vel_XSPH = velMasD[index] + vel_XSPH_D[index]; // paramsD.EPS_XSPH * Real3 posRad = mR3(posRadD[index]); Real3 updatedPositon = posRad + vel_XSPH * dT; if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) { printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n"); *isErrorD = true; return; } posRadD[index] = mR4(updatedPositon, h); //------------- // ** velocity //------------- // Note that the velocity update should not use the XSPH contribution // It adds dissipation to the solution, and provides numerical damping Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index] Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT; velMasD[index] = updatedVelocity; //------------- // ** density //------------- if (paramsD.elastic_SPH) { // This is only implemented for granular material rhoPresMu.y = p_tr; rhoPresMu.x = paramsD.rho0; } else { Real rho2 = rhoPresMu.x + derivVelRho.w * dT; rhoPresMu.y = Eos(rho2, rhoPresMu.w); rhoPresMu.x = rho2; } if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) { printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n"); *isErrorD = true; return; } rhoPresMuD[index] = rhoPresMu; } // Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time // derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces. // calculate the force that is f=m dv/dt // derivVelRhoD[index] *= paramsD.markerMass; } //------------------------------------------------------------------------------ __global__ void Update_Fluid_State(Real3* new_vel, Real4* posRad, Real3* velMas, Real4* rhoPreMu, int4 updatePortion, double dT, volatile bool* isErrorD) { uint i_idx = blockIdx.x * blockDim.x + threadIdx.x; if (i_idx >= updatePortion.y) return; velMas[i_idx] = new_vel[i_idx]; Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx]; Real h = posRad[i_idx].w; posRad[i_idx] = mR4(newpos, h); if (!(isfinite(posRad[i_idx].x) && isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) { printf("Error! particle %d position is NAN: thrown from UpdateFluidDKernel %f,%f,%f,%f\n", i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w); } if (!(isfinite(rhoPreMu[i_idx].x) && isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) { printf("Error! particle %d rhoPreMu is NAN: thrown from UpdateFluidDKernel ! %f,%f,%f,%f\n", i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w); } if (!(isfinite(velMas[i_idx].x) && isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) { printf("Error! particle %d velocity is NAN: thrown from UpdateFluidDKernel !%f,%f,%f\n", i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z); } } // ----------------------------------------------------------------------------- // Kernel for updating the density. // It calculates the density of the particle. It does include the normalization // close to the boundaries and free surface. __global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu, Real4* sortedPosRad, Real3* sortedVelMas, Real4* sortedRhoPreMu, uint* gridMarkerIndex, uint* cellStart, uint* cellEnd) { uint index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numObjectsD.numAllMarkers) return; // read particle data from sorted arrays Real3 posRadA = mR3(sortedPosRad[index]); Real4 rhoPreMuA = sortedRhoPreMu[index]; // get address in grid int3 gridPos = calcGridPos(posRadA); Real numerator = 0.0; Real denominator = 0.0; // examine neighbouring cells for (int z = -1; z <= 1; z++) { for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int3 neighbourPos = gridPos + mI3(x, y, z); collideCellDensityReInit(numerator, denominator, neighbourPos, index, posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd); } } } rhoPreMuA.x = numerator; // denominator; // rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w); dummySortedRhoPreMu[index] = rhoPreMuA; } // ----------------------------------------------------------------------------- // Kernel for updating the activity of all particles. __global__ void UpdateActivityD(Real4* posRadD, Real3* velMasD, Real3* posRigidBodiesD, Real3* pos_fsi_fea_D, uint* activityIdentifierD, uint* extendedActivityIdD, int2 updatePortion, Real Time, volatile bool* isErrorD) { uint index = blockIdx.x * blockDim.x + threadIdx.x; index += updatePortion.x; if (index >= updatePortion.y) return; // Set the particle as an active particle activityIdentifierD[index] = 1; extendedActivityIdD[index] = 1; // If during the settling phase, all particles are active if (Time < paramsD.settlingTime) return; size_t numRigidBodies = numObjectsD.numRigidBodies; size_t numFlexNodes = numObjectsD.numFlexNodes; size_t numTotal = numRigidBodies + numFlexNodes; // Check the activity of this particle uint isNotActive = 0; uint isNotExtended = 0; Real3 Acdomain = paramsD.bodyActiveDomain; Real3 ExAcdomain = paramsD.bodyActiveDomain + mR3(2 * RESOLUTION_LENGTH_MULT * paramsD.HSML); Real3 posRadA = mR3(posRadD[index]); for (uint num = 0; num < numRigidBodies; num++) { Real3 detPos = posRadA - posRigidBodiesD[num]; if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y || abs(detPos.z) > Acdomain.z) isNotActive = isNotActive + 1; if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y || abs(detPos.z) > ExAcdomain.z) isNotExtended = isNotExtended + 1; } for (uint num = 0; num < numFlexNodes; num++) { Real3 detPos = posRadA - pos_fsi_fea_D[num]; if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y || abs(detPos.z) > Acdomain.z) isNotActive = isNotActive + 1; if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y || abs(detPos.z) > ExAcdomain.z) isNotExtended = isNotExtended + 1; } // Set the particle as an inactive particle if needed if (isNotActive == numTotal && numTotal > 0) { activityIdentifierD[index] = 0; velMasD[index] = mR3(0.0); } if (isNotExtended == numTotal && numTotal > 0) extendedActivityIdD[index] = 0; return; } // ----------------------------------------------------------------------------- // CLASS FOR FLUID DYNAMICS SYSTEM // ----------------------------------------------------------------------------- ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker, ChSystemFsi_impl& otherFsiSystem, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<ChCounters> otherNumObjects, TimeIntegrator type, bool verb) : fsiSystem(otherFsiSystem), paramsH(otherParamsH), numObjectsH(otherNumObjects), integrator_type(type), verbose(verb) { switch (integrator_type) { case TimeIntegrator::I2SPH: forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>( otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD, fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb); if (verbose) { cout << "====== Created an I2SPH framework" << endl; } break; case TimeIntegrator::IISPH: forceSystem = chrono_types::make_shared<ChFsiForceIISPH>( otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD, fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb); if (verbose) { cout << "====== Created an IISPH framework" << endl; } break; case TimeIntegrator::EXPLICITSPH: forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>( otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD, fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb); if (verbose) { cout << "====== Created a WCSPH framework" << endl; } break; // Extend this function with your own linear solvers default: forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>( otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD, fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb); cout << "Selected integrator type not implemented, reverting back to WCSPH" << endl; } } // ----------------------------------------------------------------------------- ChFluidDynamics::~ChFluidDynamics() {} // ----------------------------------------------------------------------------- void ChFluidDynamics::Initialize() { forceSystem->Initialize(); cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters)); cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams)); } // ----------------------------------------------------------------------------- void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2, std::shared_ptr<SphMarkerDataD> sphMarkersD1, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD, Real dT, Real Time) { if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) { this->UpdateActivity(sphMarkersD1, sphMarkersD2, fsiBodiesD, fsiMeshD, Time); forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD); } else forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD); if (integrator_type == TimeIntegrator::IISPH) this->UpdateFluid_Implicit(sphMarkersD2); else if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) this->UpdateFluid(sphMarkersD1, dT); this->ApplyBoundarySPH_Markers(sphMarkersD2); } // ----------------------------------------------------------------------------- void ChFluidDynamics::UpdateActivity(std::shared_ptr<SphMarkerDataD> sphMarkersD1, std::shared_ptr<SphMarkerDataD> sphMarkersD2, std::shared_ptr<FsiBodiesDataD> fsiBodiesD, std::shared_ptr<FsiMeshDataD> fsiMeshD, Real Time) { // Update portion of the SPH particles (should be all particles here) int2 updatePortion = mI2(0, (int)numObjectsH->numAllMarkers); bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); //------------------------ uint numBlocks, numThreads; computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads); UpdateActivityD<<<numBlocks, numThreads>>>( mR4CAST(sphMarkersD2->posRadD), mR3CAST(sphMarkersD1->velMasD), mR3CAST(fsiBodiesD->posRigid_fsiBodies_D), mR3CAST(fsiMeshD->pos_fsi_fea_D), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD), U1CAST(fsiSystem.fsiGeneralData->extendedActivityIdD), updatePortion, Time, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); //------------------------ cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) throw std::runtime_error("Error! program crashed in UpdateActivityD!\n"); cudaFree(isErrorD); free(isErrorH); } // ----------------------------------------------------------------------------- void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) { // Update portion of the SPH particles (should be fluid particles only here) int2 updatePortion = mI2(0, fsiSystem.fsiGeneralData->referenceArray[0].y); bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); //------------------------ uint numBlocks, numThreads; computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads); UpdateFluidD<<<numBlocks, numThreads>>>( mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->tauXxYyZzD), mR3CAST(sphMarkersD->tauXyXzYzD), mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D), mR4CAST(fsiSystem.fsiGeneralData->derivVelRhoD), mR3CAST(fsiSystem.fsiGeneralData->derivTauXxYyZzD), mR3CAST(fsiSystem.fsiGeneralData->derivTauXyXzYzD), mR4CAST(fsiSystem.fsiGeneralData->sr_tau_I_mu_i), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD), U1CAST(fsiSystem.fsiGeneralData->freeSurfaceIdD), updatePortion, dT, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); //------------------------ cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) throw std::runtime_error("Error! program crashed in UpdateFluidD!\n"); cudaFree(isErrorD); free(isErrorH); } // ----------------------------------------------------------------------------- void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint numThreads, numBlocks; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0; int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0; int4 updatePortion = mI4(fsiSystem.fsiGeneralData->referenceArray[haveHelper].x, fsiSystem.fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0); cout << "time step in UpdateFluid_Implicit " << paramsH->dT << endl; bool *isErrorH, *isErrorD; isErrorH = (bool*)malloc(sizeof(bool)); cudaMalloc((void**)&isErrorD, sizeof(bool)); *isErrorH = false; cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice); Update_Fluid_State<<<numBlocks, numThreads>>>( mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D), mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD), updatePortion, paramsH->dT, isErrorD); cudaDeviceSynchronize(); cudaCheckError(); cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost); if (*isErrorH == true) throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n"); cudaFree(isErrorD); free(isErrorH); } // ----------------------------------------------------------------------------- // Apply periodic boundary conditions in x, y, and z directions void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint numBlocks, numThreads; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); ApplyPeriodicBoundaryXKernel<<<numBlocks, numThreads>>>( mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); cudaDeviceSynchronize(); cudaCheckError(); ApplyPeriodicBoundaryYKernel<<<numBlocks, numThreads>>>( mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); cudaDeviceSynchronize(); cudaCheckError(); ApplyPeriodicBoundaryZKernel<<<numBlocks, numThreads>>>( mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); cudaDeviceSynchronize(); cudaCheckError(); // ApplyOutOfBoundaryKernel<<<numBlocks, numThreads>>> // (mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->velMasD)); // cudaDeviceSynchronize(); // cudaCheckError(); } // ----------------------------------------------------------------------------- // Apply periodic boundary conditions in y, and z. // The inlet/outlet BC is applied in the x direction. // This functions needs to be tested. void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) { uint numBlocks, numThreads; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); ApplyInletBoundaryXKernel<<<numBlocks, numThreads>>>( mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD), mR4CAST(sphMarkersD->rhoPresMuD)); cudaDeviceSynchronize(); cudaCheckError(); // these are useful anyway for out of bound particles ApplyPeriodicBoundaryYKernel<<<numBlocks, numThreads>>>( mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); cudaDeviceSynchronize(); cudaCheckError(); ApplyPeriodicBoundaryZKernel<<<numBlocks, numThreads>>>( mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD)); cudaDeviceSynchronize(); cudaCheckError(); } // ----------------------------------------------------------------------------- void ChFluidDynamics::DensityReinitialization() { uint numBlocks, numThreads; computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads); thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers); thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0)); ReCalcDensityD_F1<<<numBlocks, numThreads>>>( mR4CAST(dummySortedRhoPreMu), mR4CAST(fsiSystem.sortedSphMarkersD->posRadD), mR3CAST(fsiSystem.sortedSphMarkersD->velMasD), mR4CAST(fsiSystem.sortedSphMarkersD->rhoPresMuD), U1CAST(fsiSystem.markersProximityD->gridMarkerIndexD), U1CAST(fsiSystem.markersProximityD->cellStartD), U1CAST(fsiSystem.markersProximityD->cellEndD)); cudaDeviceSynchronize(); cudaCheckError(); ChFsiForce::CopySortedToOriginal_NonInvasive_R4( fsiSystem.sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu, fsiSystem.markersProximityD->gridMarkerIndexD); ChFsiForce::CopySortedToOriginal_NonInvasive_R4( fsiSystem.sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu, fsiSystem.markersProximityD->gridMarkerIndexD); dummySortedRhoPreMu.clear(); } } // namespace fsi } // end namespace chrono
84676dbb297dcd1539837c9ccb7a8dee4236b2e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "scc.h" #include "scc_kernels.h" using namespace std; void wSlota(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2, int warpSize){ //Set the device which exclusively used by this program hipSetDevice(7); float sccTime=0; hipEvent_t sccTimeStart, sccTimeStop; hipEventCreate(&sccTimeStart); hipEventCreate(&sccTimeStop); hipEventRecord(sccTimeStart, 0); //-----------GPU initialization----------------------------> uint32_t* d_Fr = NULL; uint32_t* d_Br = NULL; uint32_t* d_Fc = NULL; uint32_t* d_Bc = NULL; uint32_t* d_pivots = NULL; uint32_t* d_range = NULL; uint8_t* d_tags = NULL; uint8_t* tags = new uint8_t[RSize+1]; bool volatile* d_terminatef = NULL; bool terminatef = false; bool volatile* d_terminateb = NULL; bool terminateb = false; int FWD_iterations = 0; int BWD_iterations = 0; uint32_t iterations = 0; const uint32_t max_pivot_count = 1; hipError_t e1, e2, e3, e4, e5, e6, e7, e8, e9; CUDA_SAFE_CALL( e1 = hipMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) )); CUDA_SAFE_CALL( e2 = hipMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) )); CUDA_SAFE_CALL( e3 = hipMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) )); CUDA_SAFE_CALL( e4 = hipMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) )); CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t))); CUDA_SAFE_CALL( e6 = hipMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t))); CUDA_SAFE_CALL( e7 = hipMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) )); CUDA_SAFE_CALL( e8 = hipMalloc( (void**) &d_terminatef, sizeof(bool) )); CUDA_SAFE_CALL( e9 = hipMalloc( (void**) &d_terminateb, sizeof(bool) )); if (e1 == hipErrorMemoryAllocation || e2 == hipErrorMemoryAllocation || e3 == hipErrorMemoryAllocation || e4 == hipErrorMemoryAllocation || e5 == hipErrorMemoryAllocation || e6 == hipErrorMemoryAllocation || e7 == hipErrorMemoryAllocation || e8 == hipErrorMemoryAllocation || e9 == hipErrorMemoryAllocation) { throw "Error: Not enough memory on GPU\n"; } CUDA_SAFE_CALL( hipMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t))); CUDA_SAFE_CALL( hipMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t))); dim3 gridfb; if((RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) { int dim = ceill(sqrt(RSize * warpSize / BLOCKSIZE)); gridfb.x = dim; gridfb.y = dim; gridfb.z = 1; }else{ gridfb.x = (RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE; gridfb.y = 1; gridfb.z = 1; } //for vertex-to-thread mapping dim3 grid; if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) { int dim = ceill(sqrt(RSize / BLOCKSIZE)); grid.x = dim; grid.y = dim; grid.z = 1; }else{ grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE; grid.y = 1; grid.z = 1; } dim3 threads(BLOCKSIZE, 1, 1); #ifdef _DEBUG float pivotTime = 0, temp = 0, bTime = 0, pTime = 0, trim1Time = 0, updateTime = 0, bfsTime = 0; hipEvent_t bTimeStart, bTimeStop, pTimeStart, pTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop; hipEvent_t trim1TimeStart, trim1TimeStop, bfsTimeStart, bfsTimeStop; hipEventCreate(&bTimeStart); hipEventCreate(&bTimeStop); hipEventCreate(&pTimeStart); hipEventCreate(&pTimeStop); hipEventCreate(&pivotTimeStart); hipEventCreate(&pivotTimeStop); hipEventCreate(&trim1TimeStart); hipEventCreate(&trim1TimeStop); hipEventCreate(&updateTimeStart); hipEventCreate(&updateTimeStop); hipEventCreate(&bfsTimeStart); hipEventCreate(&bfsTimeStop); #endif #ifdef _DEBUG hipEventRecord(trim1TimeStart, 0); #endif //-----------Trimming--------------------------------------> unsigned shm = sizeof(unsigned) * sz; if(t1) hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads),shm, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef); #ifdef _DEBUG hipEventRecord(trim1TimeStop, 0); hipEventSynchronize(trim1TimeStop); hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop); trim1Time+=temp; #endif //-----------Choose pivots---------------------------------> #ifdef _DEBUG hipEventRecord(pivotTimeStart, 0); #endif CUDA_SAFE_CALL( hipMemset( d_pivots, 0, sizeof(uint32_t) )); hipLaunchKernelGGL(( pollForFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots, d_Fr, d_Br); hipLaunchKernelGGL(( selectFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots); #ifdef _DEBUG hipEventRecord(pivotTimeStop, 0); hipEventSynchronize(pivotTimeStop); hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop); pivotTime+=temp; #endif #ifdef _DEBUG hipEventRecord(bfsTimeStart, 0); #endif do{//Forward and Backward reachability FWD_iterations++; BWD_iterations++; CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) )); CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) )); switch(warpSize){ case 1: hipLaunchKernelGGL(( fwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); hipLaunchKernelGGL(( bwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 2: hipLaunchKernelGGL(( fwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); hipLaunchKernelGGL(( bwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 4: hipLaunchKernelGGL(( fwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); hipLaunchKernelGGL(( bwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 8: hipLaunchKernelGGL(( fwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); hipLaunchKernelGGL(( bwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 16: hipLaunchKernelGGL(( fwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); hipLaunchKernelGGL(( bwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 32: hipLaunchKernelGGL(( fwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); hipLaunchKernelGGL(( bwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; } CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost )); }while(!terminatef && !terminateb); while(!terminatef){//Forward reachability FWD_iterations++; CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) )); switch(warpSize){ case 1: hipLaunchKernelGGL(( fwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 2: hipLaunchKernelGGL(( fwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 4: hipLaunchKernelGGL(( fwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 8: hipLaunchKernelGGL(( fwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 16: hipLaunchKernelGGL(( fwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 32: hipLaunchKernelGGL(( fwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; } CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); } while(!terminateb){//Backward reachability BWD_iterations++; CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) )); switch(warpSize){ case 1: hipLaunchKernelGGL(( bwd_warp<1>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 2: hipLaunchKernelGGL(( bwd_warp<2>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 4: hipLaunchKernelGGL(( bwd_warp<4>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 8: hipLaunchKernelGGL(( bwd_warp<8>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 16: hipLaunchKernelGGL(( bwd_warp<16>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 32: hipLaunchKernelGGL(( bwd_warp<32>), dim3(gridfb), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; } CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost )); } #ifdef _DEBUG hipEventRecord(bfsTimeStop, 0); hipEventSynchronize(bfsTimeStop); hipEventElapsedTime(&temp, bfsTimeStart, bfsTimeStop); bfsTime+=temp; #endif #ifdef _DEBUG hipEventRecord(updateTimeStart, 0); #endif hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef); #ifdef _DEBUG hipEventRecord(updateTimeStop, 0); hipEventSynchronize(updateTimeStop); hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop); updateTime+=temp; #endif //-----------Main algorithm--------------------------------> while ( true ) { iterations++; //cout<<"\nIteration : "<<iterations<<endl; #ifdef _DEBUG hipEventRecord(pTimeStart, 0); #endif hipLaunchKernelGGL(( assignUniqueRange), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize); do{ CUDA_SAFE_CALL( hipMemset((void *)d_terminatef, true, sizeof(bool) )); hipLaunchKernelGGL(( colorPropagation), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); }while(!terminatef); #ifdef _DEBUG hipEventRecord(pTimeStop, 0); hipEventSynchronize(pTimeStop); hipEventElapsedTime(&temp, pTimeStart, pTimeStop); pTime+=temp; #endif #ifdef _DEBUG hipEventRecord(bTimeStart, 0); #endif hipLaunchKernelGGL(( selectPivotColoring), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize); do{//Forward reachability CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) )); hipLaunchKernelGGL(( fwdColoring), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); }while(!terminatef); CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) )); hipLaunchKernelGGL(( updateColoring), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); if (terminatef) break; //only way out #ifdef _DEBUG hipEventRecord(bTimeStop, 0); hipEventSynchronize(bTimeStop); hipEventElapsedTime(&temp, bTimeStart, bTimeStop); bTime+=temp; #endif } //<----------Main algorithm--------------------------------- //SCC extraction CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost )); uint32_t numberOf1Sccs = 0; uint32_t numberOfPivotSccs = 0; uint32_t numberOfSccs = 0; for(uint32_t i=1;i<=RSize;i++) if(isTrim1(tags[i])) numberOf1Sccs++; else if(isPivot(tags[i])) numberOfPivotSccs++; numberOfSccs = numberOf1Sccs + numberOfPivotSccs; hipEventRecord(sccTimeStop, 0); hipEventSynchronize(sccTimeStop); hipEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop); //printf(", %u, %d, %d", iterations, FWD_iterations , BWD_iterations); #ifdef _DEBUG printf(", %f", bfsTime); printf(", %f", pTime); printf(", %f", bTime); printf(", %f", trim1Time); printf(", %f", pivotTime); printf(", %f", updateTime); #endif printf("\nNumber Of Sccs : %d", numberOfSccs); printf("\nTime : %f", sccTime ); CUDA_SAFE_CALL( hipFree( d_Fc )); CUDA_SAFE_CALL( hipFree( d_Fr )); CUDA_SAFE_CALL( hipFree( d_Bc )); CUDA_SAFE_CALL( hipFree( d_Br )); CUDA_SAFE_CALL( hipFree( d_range)); CUDA_SAFE_CALL( hipFree( d_tags)); CUDA_SAFE_CALL( hipFree( d_pivots )); CUDA_SAFE_CALL( hipFree( (void *)d_terminatef)); CUDA_SAFE_CALL( hipFree( (void *)d_terminateb)); hipEventDestroy(sccTimeStart); hipEventDestroy(sccTimeStop); #ifdef _DEBUG hipEventDestroy(bTimeStart); hipEventDestroy(bTimeStop); hipEventDestroy(pTimeStart); hipEventDestroy(pTimeStop); hipEventDestroy(trim1TimeStart); hipEventDestroy(trim1TimeStop); hipEventDestroy(bfsTimeStart); hipEventDestroy(bfsTimeStop); hipEventDestroy(pivotTimeStart); hipEventDestroy(pivotTimeStop); hipEventDestroy(updateTimeStart); hipEventDestroy(updateTimeStop); #endif return; } void vSlota(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2){ //Set the device which exclusively used by this program hipSetDevice(7); float sccTime=0; hipEvent_t sccTimeStart, sccTimeStop; hipEventCreate(&sccTimeStart); hipEventCreate(&sccTimeStop); hipEventRecord(sccTimeStart, 0); //-----------GPU initialization----------------------------> uint32_t* d_Fr = NULL; uint32_t* d_Br = NULL; uint32_t* d_Fc = NULL; uint32_t* d_Bc = NULL; uint32_t* d_pivots = NULL; uint32_t* d_range = NULL; uint8_t* d_tags = NULL; uint8_t* tags = new uint8_t[RSize+1]; bool volatile* d_terminatef = NULL; bool terminatef = false; bool volatile* d_terminateb = NULL; bool terminateb = false; int FWD_iterations = 0; int BWD_iterations = 0; uint32_t iterations = 0; const uint32_t max_pivot_count = 1; hipError_t e1, e2, e3, e4, e5, e6, e7, e8, e9; CUDA_SAFE_CALL( e1 = hipMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) )); CUDA_SAFE_CALL( e2 = hipMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) )); CUDA_SAFE_CALL( e3 = hipMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) )); CUDA_SAFE_CALL( e4 = hipMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) )); CUDA_SAFE_CALL( e5 = hipMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t))); CUDA_SAFE_CALL( e6 = hipMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t))); CUDA_SAFE_CALL( e7 = hipMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) )); CUDA_SAFE_CALL( e8 = hipMalloc( (void**) &d_terminatef, sizeof(bool) )); CUDA_SAFE_CALL( e9 = hipMalloc( (void**) &d_terminateb, sizeof(bool) )); if (e1 == hipErrorMemoryAllocation || e2 == hipErrorMemoryAllocation || e3 == hipErrorMemoryAllocation || e4 == hipErrorMemoryAllocation || e5 == hipErrorMemoryAllocation || e6 == hipErrorMemoryAllocation || e7 == hipErrorMemoryAllocation || e8 == hipErrorMemoryAllocation || e9 == hipErrorMemoryAllocation) { throw "Error: Not enough memory on GPU\n"; } CUDA_SAFE_CALL( hipMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t))); CUDA_SAFE_CALL( hipMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t))); //for vertex-to-thread mapping dim3 grid; if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) { int dim = ceill(sqrt(RSize / BLOCKSIZE)); grid.x = dim; grid.y = dim; grid.z = 1; }else{ grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE; grid.y = 1; grid.z = 1; } dim3 threads(BLOCKSIZE, 1, 1); #ifdef _DEBUG float pivotTime = 0, temp = 0, pTime = 0, bTime = 0, trim1Time = 0, updateTime = 0, bfsTime = 0; hipEvent_t pTimeStart, pTimeStop, bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop; hipEvent_t trim1TimeStart, trim1TimeStop, bfsTimeStart, bfsTimeStop; hipEventCreate(&pTimeStart); hipEventCreate(&pTimeStop); hipEventCreate(&bTimeStart); hipEventCreate(&bTimeStop); hipEventCreate(&pivotTimeStart); hipEventCreate(&pivotTimeStop); hipEventCreate(&trim1TimeStart); hipEventCreate(&trim1TimeStop); hipEventCreate(&updateTimeStart); hipEventCreate(&updateTimeStop); hipEventCreate(&bfsTimeStart); hipEventCreate(&bfsTimeStop); #endif #ifdef _DEBUG hipEventRecord(trim1TimeStart, 0); #endif //-----------Trimming--------------------------------------> if(t1) hipLaunchKernelGGL(( trim1), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef); #ifdef _DEBUG hipEventRecord(trim1TimeStop, 0); hipEventSynchronize(trim1TimeStop); hipEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop); trim1Time+=temp; #endif //-----------Choose pivots---------------------------------> #ifdef _DEBUG hipEventRecord(pivotTimeStart, 0); #endif CUDA_SAFE_CALL( hipMemset( d_pivots, 0, sizeof(uint32_t) )); hipLaunchKernelGGL(( pollForFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots, d_Fr, d_Br); hipLaunchKernelGGL(( selectFirstPivot), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_pivots); #ifdef _DEBUG hipEventRecord(pivotTimeStop, 0); hipEventSynchronize(pivotTimeStop); hipEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop); pivotTime+=temp; #endif #ifdef _DEBUG hipEventRecord(bfsTimeStart, 0); #endif do{//Forward and Backward reachability FWD_iterations++; BWD_iterations++; CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) )); CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) )); hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost )); }while(!terminatef && !terminateb); while(!terminatef){//Forward reachability FWD_iterations++; CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) )); hipLaunchKernelGGL(( fwd), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); } while(!terminateb){//Backward reachability BWD_iterations++; CUDA_SAFE_CALL( hipMemset((void *)d_terminateb, true, sizeof(bool) )); hipLaunchKernelGGL(( bwd), dim3(grid), dim3(threads), 0, 0, d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); CUDA_SAFE_CALL( hipMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), hipMemcpyDeviceToHost )); } #ifdef _DEBUG hipEventRecord(bfsTimeStop, 0); hipEventSynchronize(bfsTimeStop); hipEventElapsedTime(&temp, bfsTimeStart, bfsTimeStop); bfsTime+=temp; #endif #ifdef _DEBUG hipEventRecord(updateTimeStart, 0); #endif hipLaunchKernelGGL(( update), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize, d_terminatef); #ifdef _DEBUG hipEventRecord(updateTimeStop, 0); hipEventSynchronize(updateTimeStop); hipEventElapsedTime(&temp, updateTimeStart, updateTimeStop); updateTime+=temp; #endif //-----------Main algorithm--------------------------------> while ( true ) { iterations++; //cout<<"\nIteration : "<<iterations<<endl; #ifdef _DEBUG hipEventRecord(pTimeStart, 0); #endif hipLaunchKernelGGL(( assignUniqueRange), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize); do{ CUDA_SAFE_CALL( hipMemset((void *)d_terminatef, true, sizeof(bool) )); hipLaunchKernelGGL(( colorPropagation), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); }while(!terminatef); #ifdef _DEBUG hipEventRecord(pTimeStop, 0); hipEventSynchronize(pTimeStop); hipEventElapsedTime(&temp, pTimeStart, pTimeStop); pTime+=temp; #endif #ifdef _DEBUG hipEventRecord(bTimeStart, 0); #endif hipLaunchKernelGGL(( selectPivotColoring), dim3(grid), dim3(threads), 0, 0, d_range, d_tags, RSize); do{//Forward reachability CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) )); hipLaunchKernelGGL(( fwdColoring), dim3(grid), dim3(threads), 0, 0, d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); }while(!terminatef); CUDA_SAFE_CALL( hipMemset( (void *)d_terminatef, true, sizeof(bool) )); hipLaunchKernelGGL(( updateColoring), dim3(grid), dim3(threads), 0, 0, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( hipMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), hipMemcpyDeviceToHost )); if (terminatef) break; //only way out #ifdef _DEBUG hipEventRecord(bTimeStop, 0); hipEventSynchronize(bTimeStop); hipEventElapsedTime(&temp, bTimeStart, bTimeStop); bTime+=temp; #endif } //<----------Main algorithm--------------------------------- //SCC extraction CUDA_SAFE_CALL( hipMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), hipMemcpyDeviceToHost )); uint32_t numberOf1Sccs = 0; uint32_t numberOfPivotSccs = 0; uint32_t numberOfSccs = 0; for(uint32_t i=1;i<=RSize;i++) if(isTrim1(tags[i])) numberOf1Sccs++; else if(isPivot(tags[i])) numberOfPivotSccs++; numberOfSccs = numberOf1Sccs + numberOfPivotSccs; hipEventRecord(sccTimeStop, 0); hipEventSynchronize(sccTimeStop); hipEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop); //printf(", %u, %d, %d", iterations, FWD_iterations , BWD_iterations); #ifdef _DEBUG printf(", %f", bfsTime); printf(", %f", pTime); printf(", %f", bTime); printf(", %f", trim1Time); printf(", %f", pivotTime); printf(", %f", updateTime); #endif printf("\nNumber Of Sccs : %d", numberOfSccs); printf("\nTime : %f", sccTime ); CUDA_SAFE_CALL( hipFree( d_Fc )); CUDA_SAFE_CALL( hipFree( d_Fr )); CUDA_SAFE_CALL( hipFree( d_Bc )); CUDA_SAFE_CALL( hipFree( d_Br )); CUDA_SAFE_CALL( hipFree( d_range)); CUDA_SAFE_CALL( hipFree( d_tags)); CUDA_SAFE_CALL( hipFree( d_pivots )); CUDA_SAFE_CALL( hipFree( (void *)d_terminatef)); CUDA_SAFE_CALL( hipFree( (void *)d_terminateb)); hipEventDestroy(sccTimeStart); hipEventDestroy(sccTimeStop); #ifdef _DEBUG hipEventDestroy(bTimeStart); hipEventDestroy(bTimeStop); hipEventDestroy(pTimeStart); hipEventDestroy(pTimeStop); hipEventDestroy(trim1TimeStart); hipEventDestroy(trim1TimeStop); hipEventDestroy(pivotTimeStart); hipEventDestroy(pivotTimeStop); hipEventDestroy(updateTimeStart); hipEventDestroy(updateTimeStop); hipEventDestroy(bfsTimeStart); hipEventDestroy(bfsTimeStop); #endif return; }
84676dbb297dcd1539837c9ccb7a8dee4236b2e0.cu
#include "scc.h" #include "scc_kernels.h" using namespace std; void wSlota(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2, int warpSize){ //Set the device which exclusively used by this program cudaSetDevice(7); float sccTime=0; cudaEvent_t sccTimeStart, sccTimeStop; cudaEventCreate(&sccTimeStart); cudaEventCreate(&sccTimeStop); cudaEventRecord(sccTimeStart, 0); //-----------GPU initialization----------------------------> uint32_t* d_Fr = NULL; uint32_t* d_Br = NULL; uint32_t* d_Fc = NULL; uint32_t* d_Bc = NULL; uint32_t* d_pivots = NULL; uint32_t* d_range = NULL; uint8_t* d_tags = NULL; uint8_t* tags = new uint8_t[RSize+1]; bool volatile* d_terminatef = NULL; bool terminatef = false; bool volatile* d_terminateb = NULL; bool terminateb = false; int FWD_iterations = 0; int BWD_iterations = 0; uint32_t iterations = 0; const uint32_t max_pivot_count = 1; cudaError_t e1, e2, e3, e4, e5, e6, e7, e8, e9; CUDA_SAFE_CALL( e1 = cudaMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) )); CUDA_SAFE_CALL( e2 = cudaMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) )); CUDA_SAFE_CALL( e3 = cudaMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) )); CUDA_SAFE_CALL( e4 = cudaMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) )); CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t))); CUDA_SAFE_CALL( e6 = cudaMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t))); CUDA_SAFE_CALL( e7 = cudaMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) )); CUDA_SAFE_CALL( e8 = cudaMalloc( (void**) &d_terminatef, sizeof(bool) )); CUDA_SAFE_CALL( e9 = cudaMalloc( (void**) &d_terminateb, sizeof(bool) )); if (e1 == cudaErrorMemoryAllocation || e2 == cudaErrorMemoryAllocation || e3 == cudaErrorMemoryAllocation || e4 == cudaErrorMemoryAllocation || e5 == cudaErrorMemoryAllocation || e6 == cudaErrorMemoryAllocation || e7 == cudaErrorMemoryAllocation || e8 == cudaErrorMemoryAllocation || e9 == cudaErrorMemoryAllocation) { throw "Error: Not enough memory on GPU\n"; } CUDA_SAFE_CALL( cudaMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t))); CUDA_SAFE_CALL( cudaMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t))); dim3 gridfb; if((RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) { int dim = ceill(sqrt(RSize * warpSize / BLOCKSIZE)); gridfb.x = dim; gridfb.y = dim; gridfb.z = 1; }else{ gridfb.x = (RSize * warpSize + BLOCKSIZE - 1)/BLOCKSIZE; gridfb.y = 1; gridfb.z = 1; } //for vertex-to-thread mapping dim3 grid; if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) { int dim = ceill(sqrt(RSize / BLOCKSIZE)); grid.x = dim; grid.y = dim; grid.z = 1; }else{ grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE; grid.y = 1; grid.z = 1; } dim3 threads(BLOCKSIZE, 1, 1); #ifdef _DEBUG float pivotTime = 0, temp = 0, bTime = 0, pTime = 0, trim1Time = 0, updateTime = 0, bfsTime = 0; cudaEvent_t bTimeStart, bTimeStop, pTimeStart, pTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop; cudaEvent_t trim1TimeStart, trim1TimeStop, bfsTimeStart, bfsTimeStop; cudaEventCreate(&bTimeStart); cudaEventCreate(&bTimeStop); cudaEventCreate(&pTimeStart); cudaEventCreate(&pTimeStop); cudaEventCreate(&pivotTimeStart); cudaEventCreate(&pivotTimeStop); cudaEventCreate(&trim1TimeStart); cudaEventCreate(&trim1TimeStop); cudaEventCreate(&updateTimeStart); cudaEventCreate(&updateTimeStop); cudaEventCreate(&bfsTimeStart); cudaEventCreate(&bfsTimeStop); #endif #ifdef _DEBUG cudaEventRecord(trim1TimeStart, 0); #endif //-----------Trimming--------------------------------------> unsigned shm = sizeof(unsigned) * sz; if(t1) trim1<<<grid, threads,shm>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef); #ifdef _DEBUG cudaEventRecord(trim1TimeStop, 0); cudaEventSynchronize(trim1TimeStop); cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop); trim1Time+=temp; #endif //-----------Choose pivots---------------------------------> #ifdef _DEBUG cudaEventRecord(pivotTimeStart, 0); #endif CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, sizeof(uint32_t) )); pollForFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots, d_Fr, d_Br); selectFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots); #ifdef _DEBUG cudaEventRecord(pivotTimeStop, 0); cudaEventSynchronize(pivotTimeStop); cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop); pivotTime+=temp; #endif #ifdef _DEBUG cudaEventRecord(bfsTimeStart, 0); #endif do{//Forward and Backward reachability FWD_iterations++; BWD_iterations++; CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) )); CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) )); switch(warpSize){ case 1: fwd_warp<1><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); bwd_warp<1><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 2: fwd_warp<2><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); bwd_warp<2><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 4: fwd_warp<4><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); bwd_warp<4><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 8: fwd_warp<8><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); bwd_warp<8><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 16: fwd_warp<16><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); bwd_warp<16><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 32: fwd_warp<32><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); bwd_warp<32><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; } CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost )); }while(!terminatef && !terminateb); while(!terminatef){//Forward reachability FWD_iterations++; CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) )); switch(warpSize){ case 1: fwd_warp<1><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 2: fwd_warp<2><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 4: fwd_warp<4><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 8: fwd_warp<8><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 16: fwd_warp<16><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; case 32: fwd_warp<32><<<gridfb, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); break; } CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); } while(!terminateb){//Backward reachability BWD_iterations++; CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) )); switch(warpSize){ case 1: bwd_warp<1><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 2: bwd_warp<2><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 4: bwd_warp<4><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 8: bwd_warp<8><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 16: bwd_warp<16><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; case 32: bwd_warp<32><<<gridfb, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); break; } CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost )); } #ifdef _DEBUG cudaEventRecord(bfsTimeStop, 0); cudaEventSynchronize(bfsTimeStop); cudaEventElapsedTime(&temp, bfsTimeStart, bfsTimeStop); bfsTime+=temp; #endif #ifdef _DEBUG cudaEventRecord(updateTimeStart, 0); #endif update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef); #ifdef _DEBUG cudaEventRecord(updateTimeStop, 0); cudaEventSynchronize(updateTimeStop); cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop); updateTime+=temp; #endif //-----------Main algorithm--------------------------------> while ( true ) { iterations++; //cout<<"\nIteration : "<<iterations<<endl; #ifdef _DEBUG cudaEventRecord(pTimeStart, 0); #endif assignUniqueRange<<<grid, threads>>>(d_range, d_tags, RSize); do{ CUDA_SAFE_CALL( cudaMemset((void *)d_terminatef, true, sizeof(bool) )); colorPropagation<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); }while(!terminatef); #ifdef _DEBUG cudaEventRecord(pTimeStop, 0); cudaEventSynchronize(pTimeStop); cudaEventElapsedTime(&temp, pTimeStart, pTimeStop); pTime+=temp; #endif #ifdef _DEBUG cudaEventRecord(bTimeStart, 0); #endif selectPivotColoring<<<grid, threads>>>(d_range, d_tags, RSize); do{//Forward reachability CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) )); fwdColoring<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); }while(!terminatef); CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) )); updateColoring<<<grid, threads>>>(d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); if (terminatef) break; //only way out #ifdef _DEBUG cudaEventRecord(bTimeStop, 0); cudaEventSynchronize(bTimeStop); cudaEventElapsedTime(&temp, bTimeStart, bTimeStop); bTime+=temp; #endif } //<----------Main algorithm--------------------------------- //SCC extraction CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost )); uint32_t numberOf1Sccs = 0; uint32_t numberOfPivotSccs = 0; uint32_t numberOfSccs = 0; for(uint32_t i=1;i<=RSize;i++) if(isTrim1(tags[i])) numberOf1Sccs++; else if(isPivot(tags[i])) numberOfPivotSccs++; numberOfSccs = numberOf1Sccs + numberOfPivotSccs; cudaEventRecord(sccTimeStop, 0); cudaEventSynchronize(sccTimeStop); cudaEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop); //printf(", %u, %d, %d", iterations, FWD_iterations , BWD_iterations); #ifdef _DEBUG printf(", %f", bfsTime); printf(", %f", pTime); printf(", %f", bTime); printf(", %f", trim1Time); printf(", %f", pivotTime); printf(", %f", updateTime); #endif printf("\nNumber Of Sccs : %d", numberOfSccs); printf("\nTime : %f", sccTime ); CUDA_SAFE_CALL( cudaFree( d_Fc )); CUDA_SAFE_CALL( cudaFree( d_Fr )); CUDA_SAFE_CALL( cudaFree( d_Bc )); CUDA_SAFE_CALL( cudaFree( d_Br )); CUDA_SAFE_CALL( cudaFree( d_range)); CUDA_SAFE_CALL( cudaFree( d_tags)); CUDA_SAFE_CALL( cudaFree( d_pivots )); CUDA_SAFE_CALL( cudaFree( (void *)d_terminatef)); CUDA_SAFE_CALL( cudaFree( (void *)d_terminateb)); cudaEventDestroy(sccTimeStart); cudaEventDestroy(sccTimeStop); #ifdef _DEBUG cudaEventDestroy(bTimeStart); cudaEventDestroy(bTimeStop); cudaEventDestroy(pTimeStart); cudaEventDestroy(pTimeStop); cudaEventDestroy(trim1TimeStart); cudaEventDestroy(trim1TimeStop); cudaEventDestroy(bfsTimeStart); cudaEventDestroy(bfsTimeStop); cudaEventDestroy(pivotTimeStart); cudaEventDestroy(pivotTimeStop); cudaEventDestroy(updateTimeStart); cudaEventDestroy(updateTimeStop); #endif return; } void vSlota(uint32_t CSize, uint32_t RSize, uint32_t *Fc, uint32_t *Fr, uint32_t * Bc, uint32_t * Br, bool t1, bool t2){ //Set the device which exclusively used by this program cudaSetDevice(7); float sccTime=0; cudaEvent_t sccTimeStart, sccTimeStop; cudaEventCreate(&sccTimeStart); cudaEventCreate(&sccTimeStop); cudaEventRecord(sccTimeStart, 0); //-----------GPU initialization----------------------------> uint32_t* d_Fr = NULL; uint32_t* d_Br = NULL; uint32_t* d_Fc = NULL; uint32_t* d_Bc = NULL; uint32_t* d_pivots = NULL; uint32_t* d_range = NULL; uint8_t* d_tags = NULL; uint8_t* tags = new uint8_t[RSize+1]; bool volatile* d_terminatef = NULL; bool terminatef = false; bool volatile* d_terminateb = NULL; bool terminateb = false; int FWD_iterations = 0; int BWD_iterations = 0; uint32_t iterations = 0; const uint32_t max_pivot_count = 1; cudaError_t e1, e2, e3, e4, e5, e6, e7, e8, e9; CUDA_SAFE_CALL( e1 = cudaMalloc( (void**) &d_Fc, CSize * sizeof(uint32_t) )); CUDA_SAFE_CALL( e2 = cudaMalloc( (void**) &d_Fr, (RSize + 2) * sizeof(uint32_t) )); CUDA_SAFE_CALL( e3 = cudaMalloc( (void**) &d_Bc, CSize * sizeof(uint32_t) )); CUDA_SAFE_CALL( e4 = cudaMalloc( (void**) &d_Br, (RSize + 2) * sizeof(uint32_t) )); CUDA_SAFE_CALL( e5 = cudaMalloc( (void**) &d_range, (RSize + 1) * sizeof(uint32_t))); CUDA_SAFE_CALL( e6 = cudaMalloc( (void**) &d_tags, (RSize + 1) * sizeof(uint8_t))); CUDA_SAFE_CALL( e7 = cudaMalloc( (void**) &d_pivots, max_pivot_count * sizeof(uint32_t) )); CUDA_SAFE_CALL( e8 = cudaMalloc( (void**) &d_terminatef, sizeof(bool) )); CUDA_SAFE_CALL( e9 = cudaMalloc( (void**) &d_terminateb, sizeof(bool) )); if (e1 == cudaErrorMemoryAllocation || e2 == cudaErrorMemoryAllocation || e3 == cudaErrorMemoryAllocation || e4 == cudaErrorMemoryAllocation || e5 == cudaErrorMemoryAllocation || e6 == cudaErrorMemoryAllocation || e7 == cudaErrorMemoryAllocation || e8 == cudaErrorMemoryAllocation || e9 == cudaErrorMemoryAllocation) { throw "Error: Not enough memory on GPU\n"; } CUDA_SAFE_CALL( cudaMemcpy( d_Fc, Fc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemcpy( d_Fr, Fr, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemcpy( d_Bc, Bc, CSize * sizeof(uint32_t), cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemcpy( d_Br, Br, (RSize + 2) * sizeof(uint32_t), cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemset( d_range, 0, (RSize + 1) * sizeof(uint32_t))); CUDA_SAFE_CALL( cudaMemset( d_tags, 0, (RSize + 1) * sizeof(uint8_t))); //for vertex-to-thread mapping dim3 grid; if((RSize + BLOCKSIZE - 1)/BLOCKSIZE > MaxXDimOfGrid) { int dim = ceill(sqrt(RSize / BLOCKSIZE)); grid.x = dim; grid.y = dim; grid.z = 1; }else{ grid.x = (RSize + BLOCKSIZE - 1)/BLOCKSIZE; grid.y = 1; grid.z = 1; } dim3 threads(BLOCKSIZE, 1, 1); #ifdef _DEBUG float pivotTime = 0, temp = 0, pTime = 0, bTime = 0, trim1Time = 0, updateTime = 0, bfsTime = 0; cudaEvent_t pTimeStart, pTimeStop, bTimeStart, bTimeStop, pivotTimeStart, pivotTimeStop, updateTimeStart, updateTimeStop; cudaEvent_t trim1TimeStart, trim1TimeStop, bfsTimeStart, bfsTimeStop; cudaEventCreate(&pTimeStart); cudaEventCreate(&pTimeStop); cudaEventCreate(&bTimeStart); cudaEventCreate(&bTimeStop); cudaEventCreate(&pivotTimeStart); cudaEventCreate(&pivotTimeStop); cudaEventCreate(&trim1TimeStart); cudaEventCreate(&trim1TimeStop); cudaEventCreate(&updateTimeStart); cudaEventCreate(&updateTimeStop); cudaEventCreate(&bfsTimeStart); cudaEventCreate(&bfsTimeStop); #endif #ifdef _DEBUG cudaEventRecord(trim1TimeStart, 0); #endif //-----------Trimming--------------------------------------> if(t1) trim1<<<grid, threads>>>( d_range, d_tags, d_Fc, d_Fr, d_Bc, d_Br, RSize, d_terminatef); #ifdef _DEBUG cudaEventRecord(trim1TimeStop, 0); cudaEventSynchronize(trim1TimeStop); cudaEventElapsedTime(&temp, trim1TimeStart, trim1TimeStop); trim1Time+=temp; #endif //-----------Choose pivots---------------------------------> #ifdef _DEBUG cudaEventRecord(pivotTimeStart, 0); #endif CUDA_SAFE_CALL( cudaMemset( d_pivots, 0, sizeof(uint32_t) )); pollForFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots, d_Fr, d_Br); selectFirstPivot<<<grid, threads>>>( d_tags, RSize, d_pivots); #ifdef _DEBUG cudaEventRecord(pivotTimeStop, 0); cudaEventSynchronize(pivotTimeStop); cudaEventElapsedTime(&temp, pivotTimeStart, pivotTimeStop); pivotTime+=temp; #endif #ifdef _DEBUG cudaEventRecord(bfsTimeStart, 0); #endif do{//Forward and Backward reachability FWD_iterations++; BWD_iterations++; CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) )); CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) )); fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost )); }while(!terminatef && !terminateb); while(!terminatef){//Forward reachability FWD_iterations++; CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) )); fwd<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); } while(!terminateb){//Backward reachability BWD_iterations++; CUDA_SAFE_CALL( cudaMemset((void *)d_terminateb, true, sizeof(bool) )); bwd<<<grid, threads>>>( d_Bc, d_Br, d_range, d_tags, RSize, d_terminateb); CUDA_SAFE_CALL( cudaMemcpy( &terminateb, (const void *)d_terminateb, sizeof(bool), cudaMemcpyDeviceToHost )); } #ifdef _DEBUG cudaEventRecord(bfsTimeStop, 0); cudaEventSynchronize(bfsTimeStop); cudaEventElapsedTime(&temp, bfsTimeStart, bfsTimeStop); bfsTime+=temp; #endif #ifdef _DEBUG cudaEventRecord(updateTimeStart, 0); #endif update<<<grid, threads>>>(d_range, d_tags, RSize, d_terminatef); #ifdef _DEBUG cudaEventRecord(updateTimeStop, 0); cudaEventSynchronize(updateTimeStop); cudaEventElapsedTime(&temp, updateTimeStart, updateTimeStop); updateTime+=temp; #endif //-----------Main algorithm--------------------------------> while ( true ) { iterations++; //cout<<"\nIteration : "<<iterations<<endl; #ifdef _DEBUG cudaEventRecord(pTimeStart, 0); #endif assignUniqueRange<<<grid, threads>>>(d_range, d_tags, RSize); do{ CUDA_SAFE_CALL( cudaMemset((void *)d_terminatef, true, sizeof(bool) )); colorPropagation<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); }while(!terminatef); #ifdef _DEBUG cudaEventRecord(pTimeStop, 0); cudaEventSynchronize(pTimeStop); cudaEventElapsedTime(&temp, pTimeStart, pTimeStop); pTime+=temp; #endif #ifdef _DEBUG cudaEventRecord(bTimeStart, 0); #endif selectPivotColoring<<<grid, threads>>>(d_range, d_tags, RSize); do{//Forward reachability CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) )); fwdColoring<<<grid, threads>>>( d_Fc, d_Fr, d_range, d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); }while(!terminatef); CUDA_SAFE_CALL( cudaMemset( (void *)d_terminatef, true, sizeof(bool) )); updateColoring<<<grid, threads>>>(d_tags, RSize, d_terminatef); CUDA_SAFE_CALL( cudaMemcpy( &terminatef, (const void *)d_terminatef, sizeof(bool), cudaMemcpyDeviceToHost )); if (terminatef) break; //only way out #ifdef _DEBUG cudaEventRecord(bTimeStop, 0); cudaEventSynchronize(bTimeStop); cudaEventElapsedTime(&temp, bTimeStart, bTimeStop); bTime+=temp; #endif } //<----------Main algorithm--------------------------------- //SCC extraction CUDA_SAFE_CALL( cudaMemcpy(tags, d_tags, sizeof(uint8_t) * (RSize + 1), cudaMemcpyDeviceToHost )); uint32_t numberOf1Sccs = 0; uint32_t numberOfPivotSccs = 0; uint32_t numberOfSccs = 0; for(uint32_t i=1;i<=RSize;i++) if(isTrim1(tags[i])) numberOf1Sccs++; else if(isPivot(tags[i])) numberOfPivotSccs++; numberOfSccs = numberOf1Sccs + numberOfPivotSccs; cudaEventRecord(sccTimeStop, 0); cudaEventSynchronize(sccTimeStop); cudaEventElapsedTime(&sccTime, sccTimeStart, sccTimeStop); //printf(", %u, %d, %d", iterations, FWD_iterations , BWD_iterations); #ifdef _DEBUG printf(", %f", bfsTime); printf(", %f", pTime); printf(", %f", bTime); printf(", %f", trim1Time); printf(", %f", pivotTime); printf(", %f", updateTime); #endif printf("\nNumber Of Sccs : %d", numberOfSccs); printf("\nTime : %f", sccTime ); CUDA_SAFE_CALL( cudaFree( d_Fc )); CUDA_SAFE_CALL( cudaFree( d_Fr )); CUDA_SAFE_CALL( cudaFree( d_Bc )); CUDA_SAFE_CALL( cudaFree( d_Br )); CUDA_SAFE_CALL( cudaFree( d_range)); CUDA_SAFE_CALL( cudaFree( d_tags)); CUDA_SAFE_CALL( cudaFree( d_pivots )); CUDA_SAFE_CALL( cudaFree( (void *)d_terminatef)); CUDA_SAFE_CALL( cudaFree( (void *)d_terminateb)); cudaEventDestroy(sccTimeStart); cudaEventDestroy(sccTimeStop); #ifdef _DEBUG cudaEventDestroy(bTimeStart); cudaEventDestroy(bTimeStop); cudaEventDestroy(pTimeStart); cudaEventDestroy(pTimeStop); cudaEventDestroy(trim1TimeStart); cudaEventDestroy(trim1TimeStop); cudaEventDestroy(pivotTimeStart); cudaEventDestroy(pivotTimeStop); cudaEventDestroy(updateTimeStart); cudaEventDestroy(updateTimeStop); cudaEventDestroy(bfsTimeStart); cudaEventDestroy(bfsTimeStop); #endif return; }
0c79ff66993483fa9fd7a162193b3f00ebd14ee3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <device_launch_parameters.h> #include <gflags/gflags.h> #include <groute/device/cta_scheduler.cuh> #include <groute/graphs/csr_graph.h> #include <groute/dwl/distributed_worklist.cuh> #include <groute/dwl/workers.cuh> #include <utils/cuda_utils.h> #include <utils/graphs/traversal.h> #include <utils/balancer.h> #include "sssp_common.h" #define GTID (blockIdx.x * blockDim.x + threadIdx.x) #define FILTER_THRESHOLD 0.0000000001 DECLARE_double(wl_alloc_factor_local); DECLARE_int32(source_node); DECLARE_int32(grid_size); DECLARE_int32(block_size); DECLARE_int32(mode); DECLARE_int32(source_node); DECLARE_int32(async_to_sync); DECLARE_int32(sync_to_async); const distance_t INF = UINT_MAX; namespace sssp_expr { const distance_t IDENTITY_ELEMENT = UINT_MAX; struct Algo { static const char *Name() { return "SSSP"; } }; __inline__ __device__ uint32_t warpReduce(uint32_t localSum) { localSum += __shfl_xor_sync(0xfffffff, localSum, 16); localSum += __shfl_xor_sync(0xfffffff, localSum, 8); localSum += __shfl_xor_sync(0xfffffff, localSum, 4); localSum += __shfl_xor_sync(0xfffffff, localSum, 2); localSum += __shfl_xor_sync(0xfffffff, localSum, 1); return localSum; } template<template<typename> class TDistanceDatum> __device__ void SSSPCheck__Single__(TDistanceDatum<distance_t> current_ranks, distance_t *block_sum_buffer, distance_t *rtn_sum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; int laneIdx = threadIdx.x % warpSize; int warpIdx = threadIdx.x / warpSize; const int SMEMDIM = blockDim.x / warpSize; __shared__ distance_t smem[32]; uint32_t work_size = current_ranks.size; distance_t local_sum = 0; for (uint32_t node = 0 + tid; node < work_size; node += nthreads) { distance_t dist = current_ranks[node]; if (dist != IDENTITY_ELEMENT) local_sum += dist; } local_sum = warpReduce(local_sum); if (laneIdx == 0) smem[warpIdx] = local_sum; __syncthreads(); local_sum = (threadIdx.x < SMEMDIM) ? smem[threadIdx.x] : 0; if (warpIdx == 0) local_sum = warpReduce(local_sum); if (threadIdx.x == 0) { block_sum_buffer[blockIdx.x] = local_sum; } if (tid == 0) { uint32_t sum = 0; for (int bid = 0; bid < gridDim.x; bid++) { sum += block_sum_buffer[bid]; } *rtn_sum = sum; } } // template< // template<typename> class WorkList, // typename TGraph, typename TWeightDatum, // template<typename> class TDistanceDatum, // template<typename> class TDistanceDeltaDatum> // __device__ void SSSPAsync( // const WorkList<index_t> &work_source, // WorkList<index_t> &work_immediate_target, // WorkList<index_t> &work_later_target, // const distance_t priority_threshold, // const TGraph &graph, // const TWeightDatum &edge_weights, // TDistanceDatum<distance_t> &node_distances, // TDistanceDeltaDatum<distance_t> &node_distances_delta) { // uint32_t tid = TID_1D; // uint32_t nthreads = TOTAL_THREADS_1D; // // // uint32_t work_size = work_source.count(); // // for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { // // index_t node = work_source.read(i); // // distance_t old_value = node_distances[node]; // distance_t old_delta = atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); // distance_t new_value = min(old_value, old_delta); // // if (new_value != old_value) { // for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { // index_t dest = graph.edge_dest(edge); // distance_t weight = edge_weights.get_item(edge); // distance_t new_delta = old_delta + weight; // distance_t before = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); // // if (new_delta < before) { // if (new_delta < priority_threshold) // work_immediate_target.append_warp(dest); // else // work_later_target.append_warp(dest); // } // } // } // } // } template<template<typename> class WorkTarget, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum> __device__ void SSSPKernel__NF__( WorkTarget<index_t> work_source, WorkTarget<index_t> work_immediate_target, WorkTarget<index_t> work_later_target, int delta, TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances) { uint32_t tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.read(i); distance_t distance = node_distances.get_item(node); for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight)) { if (distance + weight <= delta) { work_immediate_target.append_warp(dest); } else { work_later_target.append_warp(dest); } } } } } template<template<typename> class WorkTarget, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> __device__ // __global__ void SSSPAsync( WorkTarget<index_t> work_source, WorkTarget<index_t> work_immediate_target, WorkTarget<index_t> work_later_target, int priority_threshold, TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.read(i); distance_t old_value = node_distances[node]; distance_t old_delta = atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); //assert(old_delta != IDENTITY_ELEMENT); //If update the dest success, expand the worklist if (old_delta + weight < atomicMin(node_distances_delta.get_item_ptr(dest), old_delta + weight)) { if (old_delta + weight <= priority_threshold) { work_immediate_target.append_warp(dest); } else { work_later_target.append_warp(dest); } } } } } } template< template<typename> class WorkList, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> __device__ // __global__ void SSSPAsyncCTA( const WorkList<index_t> work_source, WorkList<index_t> work_immediate_target, WorkList<index_t> work_later_target, const distance_t priority_threshold, const TGraph graph, const TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = work_source.read(i); distance_t old_value = node_distances[node]; distance_t old_delta = node_distances_delta[node];// atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = old_delta; } } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&work_immediate_target, &work_later_target, &priority_threshold, &graph, &edge_weights, &node_distances_delta]( index_t edge, index_t size, distance_t old_delta) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; distance_t before_update = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); if (new_delta < before_update) { if (new_delta < priority_threshold) { work_immediate_target.append_warp(dest); } else { work_later_target.append_warp(dest); } } }); } } template< typename WorkSource, template<typename> class WorkTarget, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> //__global__ __device__ void SSSPSync( const WorkSource work_source, WorkTarget<index_t> work_immediate_target, WorkTarget<index_t> work_later_target, distance_t priority_threshold, index_t iteration, const TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta, TDistanceDeltaDatum<distance_t> node_distances_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.read(i); distance_t old_value = node_distances[node]; distance_t old_delta; if (iteration % 2 == 0) { old_delta = node_distances_delta[node];//atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); } else { old_delta = node_distances_last_delta[node];//atomicExch(node_distances_last_delta.get_item_ptr(node), IDENTITY_ELEMENT); } distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; distance_t before_update; if (iteration % 2 == 0) before_update = atomicMin(node_distances_last_delta.get_item_ptr(dest), new_delta); else before_update = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); if (new_delta < before_update) { if (new_delta < priority_threshold) work_immediate_target.append_warp(dest); else { work_later_target.append_warp(dest); } } } } } } //for later nodes, even though...delta > value, but as long as delta != INF, we stil have to send delta to the neighbors. template< typename WorkSource, template<typename> class WorkTarget, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> //__global__ __device__ void SSSPSyncCTA( const WorkSource work_source, WorkTarget<index_t> work_immediate_target, distance_t priority_threshold, index_t iteration, const TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta, TDistanceDeltaDatum<distance_t> node_distances_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = work_source.read(i); distance_t old_value = node_distances[node]; distance_t old_delta; if (iteration % 2 == 0) old_delta = node_distances_delta[node];// atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); else old_delta = node_distances_last_delta[node]; distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = old_delta; } } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&iteration, &work_immediate_target, &priority_threshold, &graph, &edge_weights, &node_distances_delta, &node_distances_last_delta]( index_t edge, index_t size, distance_t old_delta) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; distance_t before_update; if (iteration % 2 == 0) { before_update = atomicMin(node_distances_last_delta.get_item_ptr(dest), new_delta); } else { before_update = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); } if (new_delta < before_update) { if (new_delta < priority_threshold) { work_immediate_target.append_warp(dest); } } }); } } template< typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> __device__ void SSSPSyncCTATopo( index_t iteration, const TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta, TDistanceDeltaDatum<distance_t> node_distances_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = graph.owned_nnodes(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop bool updated = false; for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = i; distance_t old_value = node_distances[node]; distance_t old_delta; if (iteration % 2 == 0) { old_delta = atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); } else { old_delta = atomicExch(node_distances_last_delta.get_item_ptr(node), IDENTITY_ELEMENT); } distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = old_delta; updated = true; } } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&iteration, &graph, &edge_weights, &node_distances_delta, &node_distances_last_delta]( index_t edge, index_t size, distance_t old_delta) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; distance_t before_update; if (iteration % 2 == 0) { before_update = atomicMin(node_distances_last_delta.get_item_ptr(dest), new_delta); } else { before_update = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); } }); } } template<typename T> __device__ void swap(T &a, T &b) { T tmp = a; a = b; b = tmp; } //try to use topologoy template<template<typename> class WorkList, typename TGraph, template<typename> class TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> __global__ void SSSPControlHybrid__Single__(uint32_t async_to_sync, uint32_t sync_to_async, cub::GridBarrier grid_barrier, WorkList<index_t> work_source, WorkList<index_t> work_immediate_target, WorkList<index_t> work_later_target, const distance_t priority_threshold, const TGraph graph, const TWeightDatum<distance_t> edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta, TDistanceDeltaDatum<distance_t> node_distances_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; WorkList<index_t> *in_wl = &work_source; WorkList<index_t> *out_immediate_wl = &work_immediate_target; WorkList<index_t> *out_later_wl = &work_later_target; distance_t curr_threshold = priority_threshold; distance_t last_distance_sum = 0; TDistanceDeltaDatum<distance_t> *available_delta = &node_distances_delta; //Async->Sync->Async //Async -> Sync, no limitation //Sync -> Async, iteration % 2 == 1 if (tid == 0) { printf("CALL SSSPControl%s__Single__ InitPrio:%d\n", "Hybrid", priority_threshold); } // uint32_t iteration = 0; int mode = 1;//1-> Async, 0->Sync int last_iteration = 0; while (in_wl->count() > 0) { while (in_wl->count() > 0) { if ((iteration < async_to_sync || iteration >= sync_to_async)) { // SSSPKernel__NF__(*in_wl, // *out_immediate_wl, // *out_later_wl, // curr_threshold, // graph, // edge_weights, // node_distances_delta); SSSPAsyncCTA(*in_wl, *out_immediate_wl, *out_later_wl, curr_threshold, graph, edge_weights, node_distances, *available_delta); mode = 1; } else { // SSSPSyncCTATopo(iteration, // graph, // edge_weights, // node_distances, // node_distances_delta, // node_distances_last_delta); SSSPSyncCTA(*in_wl, *out_immediate_wl, curr_threshold, iteration, graph, edge_weights, node_distances, node_distances_delta, node_distances_last_delta); if (iteration % 2 == 0) { available_delta = &node_distances_last_delta; } mode = 0; } grid_barrier.Sync(); if (tid == 0) { printf("%s INPUT %d IMMEDIATE %d LATER %d\n", mode == 1 ? "Async" : "Sync", in_wl->count(), out_immediate_wl->count(), out_later_wl->count()); in_wl->reset(); } grid_barrier.Sync(); swap(in_wl, out_immediate_wl); iteration++; grid_barrier.Sync(); } swap(in_wl, out_later_wl); curr_threshold += priority_threshold; grid_barrier.Sync(); } if (tid == 0) { printf("Total iterations: %d\n", iteration); } // for (uint32_t i = 0 + tid; i < graph.nnodes; i += nthreads) { // assert(node_distances_delta[i] == IDENTITY_ELEMENT && // node_distances_last_delta[i] == IDENTITY_ELEMENT); // } } // template<bool Async, // template<typename> class WorkList, // typename TGraph, // template<typename> class TWeightDatum, // template<typename> class TDistanceDatum, // template<typename> class TDistanceDeltaDatum> // __global__ void SSSPControl__Single__(distance_t *block_sum_buffer, // cub::GridBarrier grid_barrier, // WorkList<index_t> work_source, // WorkList<index_t> work_immediate_target, // WorkList<index_t> work_later_target, // const distance_t priority_threshold, // const TGraph graph, // const TWeightDatum<distance_t> edge_weights, // TDistanceDatum<distance_t> node_distances, // TDistanceDeltaDatum<distance_t> node_distances_delta, // TDistanceDeltaDatum<distance_t> node_distances_last_delta) { // // uint32_t tid = TID_1D; // uint32_t nthreads = TOTAL_THREADS_1D; // WorkList<index_t> *in_wl = &work_source; // WorkList<index_t> *out_immediate_wl = &work_immediate_target; // WorkList<index_t> *out_later_wl = &work_later_target; // distance_t curr_threshold = priority_threshold; // distance_t last_distance_sum = 0; // // if (tid == 0) { // printf("CALL SSSPControl%s__Single__ InitPrio:%d\n", Async ? "Async" : "Sync", priority_threshold); // } //// // uint32_t iteration = 0; // while (in_wl->count() > 0) { // while (in_wl->count() > 0) { // if (Async) { // SSSPAsync(*in_wl, // *out_immediate_wl, // *out_later_wl, // curr_threshold, // graph, // edge_weights, // node_distances, // node_distances_delta); // } else { // SSSPSyncCTA(*in_wl, // *out_immediate_wl, // *out_later_wl, // curr_threshold, // iteration, // graph, // edge_weights, // node_distances, // node_distances_delta, // node_distances_last_delta); // } // grid_barrier.Sync(); // // if (tid == 0) { //// printf("INPUT %d IMMEDIATE %d LATER %d\n", in_wl->count(), out_immediate_wl->count(), //// out_later_wl->count()); // in_wl->reset(); // } // // WorkList<index_t> *tmp_wl = in_wl; // in_wl = out_immediate_wl; // out_immediate_wl = tmp_wl; // // iteration++; // grid_barrier.Sync(); // } // // WorkList<index_t> *tmp_wl = in_wl; // in_wl = out_later_wl; // out_later_wl = tmp_wl; // // curr_threshold += priority_threshold; // // //// distance_t distance_sum; //// SSSPCheck__Single__(node_distances, block_sum_buffer, &distance_sum); //// if (distance_sum == last_distance_sum) { //// printf("distance sum:%u\n", distance_sum); //// break; //// } //// last_distance_sum = distance_sum; // // grid_barrier.Sync(); // } // // if (tid == 0) { // printf("Total iterations: %d\n", iteration); // } // // //// for (uint32_t i = 0 + tid; i < graph.nnodes; i += nthreads) { //// assert(node_distances_delta[i] == IDENTITY_ELEMENT && //// node_distances_last_delta[i] == IDENTITY_ELEMENT); //// } // } template<template<typename> class DistanceDatum, template<typename> class DistanceDeltaDatum> __global__ void SSSPInit(index_t source, DistanceDatum<distance_t> distances, DistanceDeltaDatum<distance_t> delta_distances, DistanceDeltaDatum<distance_t> last_delta_distances, int nnodes) { int tid = GTID; if (tid < nnodes) { distances[tid] = IDENTITY_ELEMENT; last_delta_distances[tid] = IDENTITY_ELEMENT; delta_distances[tid] = (tid == source ? 0 : IDENTITY_ELEMENT); } } template< typename TGraph, template<typename> class TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> struct Problem { TGraph m_graph; TWeightDatum<distance_t> m_weights_datum; TDistanceDatum<distance_t> m_distances_datum; TDistanceDeltaDatum<distance_t> m_distances_delta_datum; TDistanceDeltaDatum<distance_t> m_distances_last_delta_datum; distance_t m_priority_threshold; int m_curr_threshold = 0; public: Problem(const TGraph &graph, const TWeightDatum<distance_t> &weights_datum, const TDistanceDatum<distance_t> &distances_datum, const TDistanceDeltaDatum<distance_t> &distances_delta_datum, const TDistanceDeltaDatum<distance_t> &distances_last_delta_datum, const distance_t priority_threshold) : m_graph(graph), m_weights_datum(weights_datum), m_distances_datum(distances_datum), m_distances_delta_datum(distances_delta_datum), m_distances_last_delta_datum(distances_last_delta_datum), m_priority_threshold(priority_threshold) { } void Init(groute::Queue<index_t> &in_wl, groute::Stream &stream) const { index_t source_node = min(max(0, FLAGS_source_node), m_graph.nnodes - 1); dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, m_distances_datum.size); Marker::MarkWorkitems(m_distances_datum.size, "SSSPInit"); SSSPInit << < grid_dims, block_dims, 0, stream.cuda_stream >> > (source_node, m_distances_datum, m_distances_delta_datum, m_distances_last_delta_datum, m_distances_datum.size); in_wl.AppendItemAsync(stream.cuda_stream, source_node); // add the first item to the worklist } }; } bool SSSPExpr1() { typedef sssp_expr::Problem<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum> Problem; utils::traversal::Context<sssp_expr::Algo> context(1); context.configuration.verbose = FLAGS_verbose; context.configuration.trace = FLAGS_trace; groute::graphs::single::CSRGraphAllocator dev_graph_allocator(context.host_graph); context.SetDevice(0); groute::graphs::single::EdgeInputDatum<distance_t> edge_weights; groute::graphs::single::NodeOutputDatum<distance_t> node_distances; groute::graphs::single::NodeOutputDatum<distance_t> node_delta_distances; groute::graphs::single::NodeOutputDatum<distance_t> node_last_delta_distances; dev_graph_allocator.AllocateDatumObjects(edge_weights, node_distances, node_delta_distances, node_last_delta_distances); context.SyncDevice(0); size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor_local; groute::Stream stream = context.CreateStream(0); groute::Queue<index_t> wl1(max_work_size, 0, "input queue"); groute::Queue<index_t> wl2(max_work_size, 0, "output queue1"); groute::Queue<index_t> wl3(max_work_size, 0, "output queue2"); wl1.ResetAsync(stream.cuda_stream); wl2.ResetAsync(stream.cuda_stream); wl3.ResetAsync(stream.cuda_stream); stream.Sync(); Problem problem(dev_graph_allocator.DeviceObject(), edge_weights.DeviceObject(), node_distances.DeviceObject(), node_delta_distances.DeviceObject(), node_last_delta_distances.DeviceObject(), FLAGS_prio_delta); problem.Init(wl1, stream); stream.Sync(); int occupancy_per_MP = FLAGS_grid_size; // hipOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, // sssp_expr::SSSPControl__Single__<groute::dev::Queue, // groute::graphs::dev::CSRGraph, // groute::graphs::dev::GraphDatum, // groute::graphs::dev::GraphDatum, // groute::graphs::dev::GraphDatum>, // FLAGS_block_size, 0); cub::GridBarrierLifetime grid_barrier; grid_barrier.Setup(occupancy_per_MP); printf("grid size %d block size %d\n", occupancy_per_MP, FLAGS_block_size); Stopwatch sw(true); utils::SharedArray<distance_t> block_sum_buffer(FLAGS_grid_size); // sssp_expr::SSSPControl__Single__<true> // << < occupancy_per_MP, FLAGS_block_size, 0, stream.cuda_stream >> > // (block_sum_buffer.dev_ptr, // grid_barrier, // wl1.DeviceObject(), // wl2.DeviceObject(), // wl3.DeviceObject(), // FLAGS_prio_delta, // dev_graph_allocator.DeviceObject(), // edge_weights.DeviceObject(), // node_distances.DeviceObject(), // node_delta_distances.DeviceObject(), // node_last_delta_distances.DeviceObject()); sssp_expr::SSSPControlHybrid__Single__ << < occupancy_per_MP, FLAGS_block_size, 0, stream.cuda_stream >> > (FLAGS_async_to_sync, FLAGS_sync_to_async, grid_barrier, wl1.DeviceObject(), wl2.DeviceObject(), wl3.DeviceObject(), FLAGS_prio_delta, dev_graph_allocator.DeviceObject(), edge_weights.DeviceObject(), node_distances.DeviceObject(), node_delta_distances.DeviceObject(), node_last_delta_distances.DeviceObject()); stream.Sync(); // int iteration = 0; // groute::Queue<index_t> *in_wl = &wl1, *out_immediate_wl = &wl2, *out_later_wl = &wl3; // while (in_wl->GetCount(stream) > 0) { // sssp_expr::SSSPAsyncCTA << < FLAGS_grid_size, FLAGS_block_size, 0, stream.cuda_stream >> > ( // in_wl->DeviceObject(), // out_immediate_wl->DeviceObject(), // out_later_wl->DeviceObject(), // FLAGS_prio_delta, // dev_graph_allocator.DeviceObject(), // edge_weights.DeviceObject(), // node_distances.DeviceObject(), // node_delta_distances.DeviceObject()); // // printf("After iteration: %u input: %u immediate output: %u later output: %u\n", iteration++, // in_wl->GetCount(stream), out_immediate_wl->GetCount(stream), out_later_wl->GetCount(stream)); // // in_wl->ResetAsync(stream); // stream.Sync(); // // if (out_immediate_wl->GetCount(stream) > 0) // std::swap(out_immediate_wl, in_wl); // else // std::swap(out_later_wl, in_wl); // } sw.stop(); printf("sssp done:%f\n", sw.ms()); if (FLAGS_output.size() > 0) { dev_graph_allocator.GatherDatum(node_distances); SSSPOutput(FLAGS_output.data(), node_distances.GetHostData()); // dev_graph_allocator.GatherDatum(node_delta_distances); // SSSPOutput(FLAGS_output.data(), node_delta_distances.GetHostData()); } return true; }
0c79ff66993483fa9fd7a162193b3f00ebd14ee3.cu
// Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <device_launch_parameters.h> #include <gflags/gflags.h> #include <groute/device/cta_scheduler.cuh> #include <groute/graphs/csr_graph.h> #include <groute/dwl/distributed_worklist.cuh> #include <groute/dwl/workers.cuh> #include <utils/cuda_utils.h> #include <utils/graphs/traversal.h> #include <utils/balancer.h> #include "sssp_common.h" #define GTID (blockIdx.x * blockDim.x + threadIdx.x) #define FILTER_THRESHOLD 0.0000000001 DECLARE_double(wl_alloc_factor_local); DECLARE_int32(source_node); DECLARE_int32(grid_size); DECLARE_int32(block_size); DECLARE_int32(mode); DECLARE_int32(source_node); DECLARE_int32(async_to_sync); DECLARE_int32(sync_to_async); const distance_t INF = UINT_MAX; namespace sssp_expr { const distance_t IDENTITY_ELEMENT = UINT_MAX; struct Algo { static const char *Name() { return "SSSP"; } }; __inline__ __device__ uint32_t warpReduce(uint32_t localSum) { localSum += __shfl_xor_sync(0xfffffff, localSum, 16); localSum += __shfl_xor_sync(0xfffffff, localSum, 8); localSum += __shfl_xor_sync(0xfffffff, localSum, 4); localSum += __shfl_xor_sync(0xfffffff, localSum, 2); localSum += __shfl_xor_sync(0xfffffff, localSum, 1); return localSum; } template<template<typename> class TDistanceDatum> __device__ void SSSPCheck__Single__(TDistanceDatum<distance_t> current_ranks, distance_t *block_sum_buffer, distance_t *rtn_sum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; int laneIdx = threadIdx.x % warpSize; int warpIdx = threadIdx.x / warpSize; const int SMEMDIM = blockDim.x / warpSize; __shared__ distance_t smem[32]; uint32_t work_size = current_ranks.size; distance_t local_sum = 0; for (uint32_t node = 0 + tid; node < work_size; node += nthreads) { distance_t dist = current_ranks[node]; if (dist != IDENTITY_ELEMENT) local_sum += dist; } local_sum = warpReduce(local_sum); if (laneIdx == 0) smem[warpIdx] = local_sum; __syncthreads(); local_sum = (threadIdx.x < SMEMDIM) ? smem[threadIdx.x] : 0; if (warpIdx == 0) local_sum = warpReduce(local_sum); if (threadIdx.x == 0) { block_sum_buffer[blockIdx.x] = local_sum; } if (tid == 0) { uint32_t sum = 0; for (int bid = 0; bid < gridDim.x; bid++) { sum += block_sum_buffer[bid]; } *rtn_sum = sum; } } // template< // template<typename> class WorkList, // typename TGraph, typename TWeightDatum, // template<typename> class TDistanceDatum, // template<typename> class TDistanceDeltaDatum> // __device__ void SSSPAsync( // const WorkList<index_t> &work_source, // WorkList<index_t> &work_immediate_target, // WorkList<index_t> &work_later_target, // const distance_t priority_threshold, // const TGraph &graph, // const TWeightDatum &edge_weights, // TDistanceDatum<distance_t> &node_distances, // TDistanceDeltaDatum<distance_t> &node_distances_delta) { // uint32_t tid = TID_1D; // uint32_t nthreads = TOTAL_THREADS_1D; // // // uint32_t work_size = work_source.count(); // // for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { // // index_t node = work_source.read(i); // // distance_t old_value = node_distances[node]; // distance_t old_delta = atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); // distance_t new_value = min(old_value, old_delta); // // if (new_value != old_value) { // for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { // index_t dest = graph.edge_dest(edge); // distance_t weight = edge_weights.get_item(edge); // distance_t new_delta = old_delta + weight; // distance_t before = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); // // if (new_delta < before) { // if (new_delta < priority_threshold) // work_immediate_target.append_warp(dest); // else // work_later_target.append_warp(dest); // } // } // } // } // } template<template<typename> class WorkTarget, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum> __device__ void SSSPKernel__NF__( WorkTarget<index_t> work_source, WorkTarget<index_t> work_immediate_target, WorkTarget<index_t> work_later_target, int delta, TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances) { uint32_t tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.read(i); distance_t distance = node_distances.get_item(node); for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); if (distance + weight < atomicMin(node_distances.get_item_ptr(dest), distance + weight)) { if (distance + weight <= delta) { work_immediate_target.append_warp(dest); } else { work_later_target.append_warp(dest); } } } } } template<template<typename> class WorkTarget, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> __device__ // __global__ void SSSPAsync( WorkTarget<index_t> work_source, WorkTarget<index_t> work_immediate_target, WorkTarget<index_t> work_later_target, int priority_threshold, TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.read(i); distance_t old_value = node_distances[node]; distance_t old_delta = atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); //assert(old_delta != IDENTITY_ELEMENT); //If update the dest success, expand the worklist if (old_delta + weight < atomicMin(node_distances_delta.get_item_ptr(dest), old_delta + weight)) { if (old_delta + weight <= priority_threshold) { work_immediate_target.append_warp(dest); } else { work_later_target.append_warp(dest); } } } } } } template< template<typename> class WorkList, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> __device__ // __global__ void SSSPAsyncCTA( const WorkList<index_t> work_source, WorkList<index_t> work_immediate_target, WorkList<index_t> work_later_target, const distance_t priority_threshold, const TGraph graph, const TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = work_source.read(i); distance_t old_value = node_distances[node]; distance_t old_delta = node_distances_delta[node];// atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = old_delta; } } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&work_immediate_target, &work_later_target, &priority_threshold, &graph, &edge_weights, &node_distances_delta]( index_t edge, index_t size, distance_t old_delta) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; distance_t before_update = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); if (new_delta < before_update) { if (new_delta < priority_threshold) { work_immediate_target.append_warp(dest); } else { work_later_target.append_warp(dest); } } }); } } template< typename WorkSource, template<typename> class WorkTarget, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> //__global__ __device__ void SSSPSync( const WorkSource work_source, WorkTarget<index_t> work_immediate_target, WorkTarget<index_t> work_later_target, distance_t priority_threshold, index_t iteration, const TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta, TDistanceDeltaDatum<distance_t> node_distances_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.read(i); distance_t old_value = node_distances[node]; distance_t old_delta; if (iteration % 2 == 0) { old_delta = node_distances_delta[node];//atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); } else { old_delta = node_distances_last_delta[node];//atomicExch(node_distances_last_delta.get_item_ptr(node), IDENTITY_ELEMENT); } distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; distance_t before_update; if (iteration % 2 == 0) before_update = atomicMin(node_distances_last_delta.get_item_ptr(dest), new_delta); else before_update = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); if (new_delta < before_update) { if (new_delta < priority_threshold) work_immediate_target.append_warp(dest); else { work_later_target.append_warp(dest); } } } } } } //for later nodes, even though...delta > value, but as long as delta != INF, we stil have to send delta to the neighbors. template< typename WorkSource, template<typename> class WorkTarget, typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> //__global__ __device__ void SSSPSyncCTA( const WorkSource work_source, WorkTarget<index_t> work_immediate_target, distance_t priority_threshold, index_t iteration, const TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta, TDistanceDeltaDatum<distance_t> node_distances_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.count(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = work_source.read(i); distance_t old_value = node_distances[node]; distance_t old_delta; if (iteration % 2 == 0) old_delta = node_distances_delta[node];// atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); else old_delta = node_distances_last_delta[node]; distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = old_delta; } } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&iteration, &work_immediate_target, &priority_threshold, &graph, &edge_weights, &node_distances_delta, &node_distances_last_delta]( index_t edge, index_t size, distance_t old_delta) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; distance_t before_update; if (iteration % 2 == 0) { before_update = atomicMin(node_distances_last_delta.get_item_ptr(dest), new_delta); } else { before_update = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); } if (new_delta < before_update) { if (new_delta < priority_threshold) { work_immediate_target.append_warp(dest); } } }); } } template< typename TGraph, typename TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> __device__ void SSSPSyncCTATopo( index_t iteration, const TGraph graph, TWeightDatum edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta, TDistanceDeltaDatum<distance_t> node_distances_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = graph.owned_nnodes(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop bool updated = false; for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = i; distance_t old_value = node_distances[node]; distance_t old_delta; if (iteration % 2 == 0) { old_delta = atomicExch(node_distances_delta.get_item_ptr(node), IDENTITY_ELEMENT); } else { old_delta = atomicExch(node_distances_last_delta.get_item_ptr(node), IDENTITY_ELEMENT); } distance_t new_value = min(old_value, old_delta); if (new_value < old_value) { node_distances[node] = new_value; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = old_delta; updated = true; } } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&iteration, &graph, &edge_weights, &node_distances_delta, &node_distances_last_delta]( index_t edge, index_t size, distance_t old_delta) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; distance_t before_update; if (iteration % 2 == 0) { before_update = atomicMin(node_distances_last_delta.get_item_ptr(dest), new_delta); } else { before_update = atomicMin(node_distances_delta.get_item_ptr(dest), new_delta); } }); } } template<typename T> __device__ void swap(T &a, T &b) { T tmp = a; a = b; b = tmp; } //try to use topologoy template<template<typename> class WorkList, typename TGraph, template<typename> class TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> __global__ void SSSPControlHybrid__Single__(uint32_t async_to_sync, uint32_t sync_to_async, cub::GridBarrier grid_barrier, WorkList<index_t> work_source, WorkList<index_t> work_immediate_target, WorkList<index_t> work_later_target, const distance_t priority_threshold, const TGraph graph, const TWeightDatum<distance_t> edge_weights, TDistanceDatum<distance_t> node_distances, TDistanceDeltaDatum<distance_t> node_distances_delta, TDistanceDeltaDatum<distance_t> node_distances_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; WorkList<index_t> *in_wl = &work_source; WorkList<index_t> *out_immediate_wl = &work_immediate_target; WorkList<index_t> *out_later_wl = &work_later_target; distance_t curr_threshold = priority_threshold; distance_t last_distance_sum = 0; TDistanceDeltaDatum<distance_t> *available_delta = &node_distances_delta; //Async->Sync->Async //Async -> Sync, no limitation //Sync -> Async, iteration % 2 == 1 if (tid == 0) { printf("CALL SSSPControl%s__Single__ InitPrio:%d\n", "Hybrid", priority_threshold); } // uint32_t iteration = 0; int mode = 1;//1-> Async, 0->Sync int last_iteration = 0; while (in_wl->count() > 0) { while (in_wl->count() > 0) { if ((iteration < async_to_sync || iteration >= sync_to_async)) { // SSSPKernel__NF__(*in_wl, // *out_immediate_wl, // *out_later_wl, // curr_threshold, // graph, // edge_weights, // node_distances_delta); SSSPAsyncCTA(*in_wl, *out_immediate_wl, *out_later_wl, curr_threshold, graph, edge_weights, node_distances, *available_delta); mode = 1; } else { // SSSPSyncCTATopo(iteration, // graph, // edge_weights, // node_distances, // node_distances_delta, // node_distances_last_delta); SSSPSyncCTA(*in_wl, *out_immediate_wl, curr_threshold, iteration, graph, edge_weights, node_distances, node_distances_delta, node_distances_last_delta); if (iteration % 2 == 0) { available_delta = &node_distances_last_delta; } mode = 0; } grid_barrier.Sync(); if (tid == 0) { printf("%s INPUT %d IMMEDIATE %d LATER %d\n", mode == 1 ? "Async" : "Sync", in_wl->count(), out_immediate_wl->count(), out_later_wl->count()); in_wl->reset(); } grid_barrier.Sync(); swap(in_wl, out_immediate_wl); iteration++; grid_barrier.Sync(); } swap(in_wl, out_later_wl); curr_threshold += priority_threshold; grid_barrier.Sync(); } if (tid == 0) { printf("Total iterations: %d\n", iteration); } // for (uint32_t i = 0 + tid; i < graph.nnodes; i += nthreads) { // assert(node_distances_delta[i] == IDENTITY_ELEMENT && // node_distances_last_delta[i] == IDENTITY_ELEMENT); // } } // template<bool Async, // template<typename> class WorkList, // typename TGraph, // template<typename> class TWeightDatum, // template<typename> class TDistanceDatum, // template<typename> class TDistanceDeltaDatum> // __global__ void SSSPControl__Single__(distance_t *block_sum_buffer, // cub::GridBarrier grid_barrier, // WorkList<index_t> work_source, // WorkList<index_t> work_immediate_target, // WorkList<index_t> work_later_target, // const distance_t priority_threshold, // const TGraph graph, // const TWeightDatum<distance_t> edge_weights, // TDistanceDatum<distance_t> node_distances, // TDistanceDeltaDatum<distance_t> node_distances_delta, // TDistanceDeltaDatum<distance_t> node_distances_last_delta) { // // uint32_t tid = TID_1D; // uint32_t nthreads = TOTAL_THREADS_1D; // WorkList<index_t> *in_wl = &work_source; // WorkList<index_t> *out_immediate_wl = &work_immediate_target; // WorkList<index_t> *out_later_wl = &work_later_target; // distance_t curr_threshold = priority_threshold; // distance_t last_distance_sum = 0; // // if (tid == 0) { // printf("CALL SSSPControl%s__Single__ InitPrio:%d\n", Async ? "Async" : "Sync", priority_threshold); // } //// // uint32_t iteration = 0; // while (in_wl->count() > 0) { // while (in_wl->count() > 0) { // if (Async) { // SSSPAsync(*in_wl, // *out_immediate_wl, // *out_later_wl, // curr_threshold, // graph, // edge_weights, // node_distances, // node_distances_delta); // } else { // SSSPSyncCTA(*in_wl, // *out_immediate_wl, // *out_later_wl, // curr_threshold, // iteration, // graph, // edge_weights, // node_distances, // node_distances_delta, // node_distances_last_delta); // } // grid_barrier.Sync(); // // if (tid == 0) { //// printf("INPUT %d IMMEDIATE %d LATER %d\n", in_wl->count(), out_immediate_wl->count(), //// out_later_wl->count()); // in_wl->reset(); // } // // WorkList<index_t> *tmp_wl = in_wl; // in_wl = out_immediate_wl; // out_immediate_wl = tmp_wl; // // iteration++; // grid_barrier.Sync(); // } // // WorkList<index_t> *tmp_wl = in_wl; // in_wl = out_later_wl; // out_later_wl = tmp_wl; // // curr_threshold += priority_threshold; // // //// distance_t distance_sum; //// SSSPCheck__Single__(node_distances, block_sum_buffer, &distance_sum); //// if (distance_sum == last_distance_sum) { //// printf("distance sum:%u\n", distance_sum); //// break; //// } //// last_distance_sum = distance_sum; // // grid_barrier.Sync(); // } // // if (tid == 0) { // printf("Total iterations: %d\n", iteration); // } // // //// for (uint32_t i = 0 + tid; i < graph.nnodes; i += nthreads) { //// assert(node_distances_delta[i] == IDENTITY_ELEMENT && //// node_distances_last_delta[i] == IDENTITY_ELEMENT); //// } // } template<template<typename> class DistanceDatum, template<typename> class DistanceDeltaDatum> __global__ void SSSPInit(index_t source, DistanceDatum<distance_t> distances, DistanceDeltaDatum<distance_t> delta_distances, DistanceDeltaDatum<distance_t> last_delta_distances, int nnodes) { int tid = GTID; if (tid < nnodes) { distances[tid] = IDENTITY_ELEMENT; last_delta_distances[tid] = IDENTITY_ELEMENT; delta_distances[tid] = (tid == source ? 0 : IDENTITY_ELEMENT); } } template< typename TGraph, template<typename> class TWeightDatum, template<typename> class TDistanceDatum, template<typename> class TDistanceDeltaDatum> struct Problem { TGraph m_graph; TWeightDatum<distance_t> m_weights_datum; TDistanceDatum<distance_t> m_distances_datum; TDistanceDeltaDatum<distance_t> m_distances_delta_datum; TDistanceDeltaDatum<distance_t> m_distances_last_delta_datum; distance_t m_priority_threshold; int m_curr_threshold = 0; public: Problem(const TGraph &graph, const TWeightDatum<distance_t> &weights_datum, const TDistanceDatum<distance_t> &distances_datum, const TDistanceDeltaDatum<distance_t> &distances_delta_datum, const TDistanceDeltaDatum<distance_t> &distances_last_delta_datum, const distance_t priority_threshold) : m_graph(graph), m_weights_datum(weights_datum), m_distances_datum(distances_datum), m_distances_delta_datum(distances_delta_datum), m_distances_last_delta_datum(distances_last_delta_datum), m_priority_threshold(priority_threshold) { } void Init(groute::Queue<index_t> &in_wl, groute::Stream &stream) const { index_t source_node = min(max(0, FLAGS_source_node), m_graph.nnodes - 1); dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, m_distances_datum.size); Marker::MarkWorkitems(m_distances_datum.size, "SSSPInit"); SSSPInit << < grid_dims, block_dims, 0, stream.cuda_stream >> > (source_node, m_distances_datum, m_distances_delta_datum, m_distances_last_delta_datum, m_distances_datum.size); in_wl.AppendItemAsync(stream.cuda_stream, source_node); // add the first item to the worklist } }; } bool SSSPExpr1() { typedef sssp_expr::Problem<groute::graphs::dev::CSRGraph, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum, groute::graphs::dev::GraphDatum> Problem; utils::traversal::Context<sssp_expr::Algo> context(1); context.configuration.verbose = FLAGS_verbose; context.configuration.trace = FLAGS_trace; groute::graphs::single::CSRGraphAllocator dev_graph_allocator(context.host_graph); context.SetDevice(0); groute::graphs::single::EdgeInputDatum<distance_t> edge_weights; groute::graphs::single::NodeOutputDatum<distance_t> node_distances; groute::graphs::single::NodeOutputDatum<distance_t> node_delta_distances; groute::graphs::single::NodeOutputDatum<distance_t> node_last_delta_distances; dev_graph_allocator.AllocateDatumObjects(edge_weights, node_distances, node_delta_distances, node_last_delta_distances); context.SyncDevice(0); size_t max_work_size = context.host_graph.nedges * FLAGS_wl_alloc_factor_local; groute::Stream stream = context.CreateStream(0); groute::Queue<index_t> wl1(max_work_size, 0, "input queue"); groute::Queue<index_t> wl2(max_work_size, 0, "output queue1"); groute::Queue<index_t> wl3(max_work_size, 0, "output queue2"); wl1.ResetAsync(stream.cuda_stream); wl2.ResetAsync(stream.cuda_stream); wl3.ResetAsync(stream.cuda_stream); stream.Sync(); Problem problem(dev_graph_allocator.DeviceObject(), edge_weights.DeviceObject(), node_distances.DeviceObject(), node_delta_distances.DeviceObject(), node_last_delta_distances.DeviceObject(), FLAGS_prio_delta); problem.Init(wl1, stream); stream.Sync(); int occupancy_per_MP = FLAGS_grid_size; // cudaOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy_per_MP, // sssp_expr::SSSPControl__Single__<groute::dev::Queue, // groute::graphs::dev::CSRGraph, // groute::graphs::dev::GraphDatum, // groute::graphs::dev::GraphDatum, // groute::graphs::dev::GraphDatum>, // FLAGS_block_size, 0); cub::GridBarrierLifetime grid_barrier; grid_barrier.Setup(occupancy_per_MP); printf("grid size %d block size %d\n", occupancy_per_MP, FLAGS_block_size); Stopwatch sw(true); utils::SharedArray<distance_t> block_sum_buffer(FLAGS_grid_size); // sssp_expr::SSSPControl__Single__<true> // << < occupancy_per_MP, FLAGS_block_size, 0, stream.cuda_stream >> > // (block_sum_buffer.dev_ptr, // grid_barrier, // wl1.DeviceObject(), // wl2.DeviceObject(), // wl3.DeviceObject(), // FLAGS_prio_delta, // dev_graph_allocator.DeviceObject(), // edge_weights.DeviceObject(), // node_distances.DeviceObject(), // node_delta_distances.DeviceObject(), // node_last_delta_distances.DeviceObject()); sssp_expr::SSSPControlHybrid__Single__ << < occupancy_per_MP, FLAGS_block_size, 0, stream.cuda_stream >> > (FLAGS_async_to_sync, FLAGS_sync_to_async, grid_barrier, wl1.DeviceObject(), wl2.DeviceObject(), wl3.DeviceObject(), FLAGS_prio_delta, dev_graph_allocator.DeviceObject(), edge_weights.DeviceObject(), node_distances.DeviceObject(), node_delta_distances.DeviceObject(), node_last_delta_distances.DeviceObject()); stream.Sync(); // int iteration = 0; // groute::Queue<index_t> *in_wl = &wl1, *out_immediate_wl = &wl2, *out_later_wl = &wl3; // while (in_wl->GetCount(stream) > 0) { // sssp_expr::SSSPAsyncCTA << < FLAGS_grid_size, FLAGS_block_size, 0, stream.cuda_stream >> > ( // in_wl->DeviceObject(), // out_immediate_wl->DeviceObject(), // out_later_wl->DeviceObject(), // FLAGS_prio_delta, // dev_graph_allocator.DeviceObject(), // edge_weights.DeviceObject(), // node_distances.DeviceObject(), // node_delta_distances.DeviceObject()); // // printf("After iteration: %u input: %u immediate output: %u later output: %u\n", iteration++, // in_wl->GetCount(stream), out_immediate_wl->GetCount(stream), out_later_wl->GetCount(stream)); // // in_wl->ResetAsync(stream); // stream.Sync(); // // if (out_immediate_wl->GetCount(stream) > 0) // std::swap(out_immediate_wl, in_wl); // else // std::swap(out_later_wl, in_wl); // } sw.stop(); printf("sssp done:%f\n", sw.ms()); if (FLAGS_output.size() > 0) { dev_graph_allocator.GatherDatum(node_distances); SSSPOutput(FLAGS_output.data(), node_distances.GetHostData()); // dev_graph_allocator.GatherDatum(node_delta_distances); // SSSPOutput(FLAGS_output.data(), node_delta_distances.GetHostData()); } return true; }
f30801a58829f16880b32b8a98fef4805bb82147.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cmath> #include <hpc/cuda/check.h> #include <hpc/cuda/dot.hpp> #ifndef N #define N 9192 #endif #ifndef THREADS_PER_BLOCK #define THREADS_PER_BLOCK 256 #define NUM_BLOCKS ((N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK) #endif // Reference implementation template<typename Index, typename TX, typename TY> TX dot(Index n, const TX* x, Index incX, TY* y, Index incY) { TX res = 0; for (Index index = 0; index < n; ++index) { res += x[index * incX] * y[index * incY]; } return res; } int main() { using namespace hpc::cuda; double a[N]; double b[N]; for (std::size_t i = 0; i < N; ++i) { a[i] = i; b[i] = i * i; } /* transfer vectors to GPU memory */ double* cuda_a; CHECK_CUDA(hipMalloc, (void**)&cuda_a, N * sizeof(double)); CHECK_CUDA(hipMemcpy, cuda_a, a, N * sizeof(double), hipMemcpyHostToDevice); double* cuda_b; CHECK_CUDA(hipMalloc, (void**)&cuda_b, N * sizeof(double)); CHECK_CUDA(hipMemcpy, cuda_b, b, N * sizeof(double), hipMemcpyHostToDevice); double* cuda_sums; CHECK_CUDA(hipMalloc, (void**)&cuda_sums, NUM_BLOCKS * sizeof(double)); /* execute kernel function on GPU */ hipLaunchKernelGGL(( dot), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, N, cuda_a, 1, cuda_b, 1, cuda_sums); /* transfer result vector from GPU to host memory */ double sums[NUM_BLOCKS]; CHECK_CUDA(hipMemcpy, sums, cuda_sums, NUM_BLOCKS * sizeof(double), hipMemcpyDeviceToHost); /* free space allocated at GPU memory */ CHECK_CUDA(hipFree, cuda_a); CHECK_CUDA(hipFree, cuda_b); CHECK_CUDA(hipFree, cuda_sums); /* aggregate block sums */ double sum = 0; for (std::size_t i = 0; i < NUM_BLOCKS; ++i) { sum += sums[i]; } /* print difference to local result */ double local_sum = dot(N, a, 1, b, 1); std::printf("diff: %12.4lg\n", std::abs(sum - local_sum)); }
f30801a58829f16880b32b8a98fef4805bb82147.cu
#include <cstdio> #include <cmath> #include <hpc/cuda/check.h> #include <hpc/cuda/dot.hpp> #ifndef N #define N 9192 #endif #ifndef THREADS_PER_BLOCK #define THREADS_PER_BLOCK 256 #define NUM_BLOCKS ((N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK) #endif // Reference implementation template<typename Index, typename TX, typename TY> TX dot(Index n, const TX* x, Index incX, TY* y, Index incY) { TX res = 0; for (Index index = 0; index < n; ++index) { res += x[index * incX] * y[index * incY]; } return res; } int main() { using namespace hpc::cuda; double a[N]; double b[N]; for (std::size_t i = 0; i < N; ++i) { a[i] = i; b[i] = i * i; } /* transfer vectors to GPU memory */ double* cuda_a; CHECK_CUDA(cudaMalloc, (void**)&cuda_a, N * sizeof(double)); CHECK_CUDA(cudaMemcpy, cuda_a, a, N * sizeof(double), cudaMemcpyHostToDevice); double* cuda_b; CHECK_CUDA(cudaMalloc, (void**)&cuda_b, N * sizeof(double)); CHECK_CUDA(cudaMemcpy, cuda_b, b, N * sizeof(double), cudaMemcpyHostToDevice); double* cuda_sums; CHECK_CUDA(cudaMalloc, (void**)&cuda_sums, NUM_BLOCKS * sizeof(double)); /* execute kernel function on GPU */ dot<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(N, cuda_a, 1, cuda_b, 1, cuda_sums); /* transfer result vector from GPU to host memory */ double sums[NUM_BLOCKS]; CHECK_CUDA(cudaMemcpy, sums, cuda_sums, NUM_BLOCKS * sizeof(double), cudaMemcpyDeviceToHost); /* free space allocated at GPU memory */ CHECK_CUDA(cudaFree, cuda_a); CHECK_CUDA(cudaFree, cuda_b); CHECK_CUDA(cudaFree, cuda_sums); /* aggregate block sums */ double sum = 0; for (std::size_t i = 0; i < NUM_BLOCKS; ++i) { sum += sums[i]; } /* print difference to local result */ double local_sum = dot(N, a, 1, b, 1); std::printf("diff: %12.4lg\n", std::abs(sum - local_sum)); }
19b4d9ea2a00a76dec945a276baec0aa2da81770.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <stdexcept> #include "gtest.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" // stack_storage.hpp template <typename T> struct stack_impl { unsigned capacity; unsigned size; T* data; }; template <typename T> std::ostream& operator<<(std::ostream& o, stack_impl<T>& s) { return o << "<stack_impl: " << s.capacity << ", " << s.size << ", " << s.data << ">"; } // gpu_stack.hpp template <typename T> __device__ void push(stack_impl<T>* s, T v) { /// Atomically increment the stores counter. The atomicAdd returns // the value of stores before the increment, which is the location // at which this thread can store value. unsigned position = atomicAdd(&(s->size), 1u); // It is possible that stores>capacity. In this case, only capacity // entries are stored, and additional values are lost. The stores // contains the total number of attempts to push. if (position<s->capacity) { s->data[position] = v; } // Note: there are no guards against s.stores overflowing: in which // case the stores counter would start again from 0, and values would // be overwritten from the front of the stack. } namespace kernels { template <typename T> __global__ void push_back(stack_impl<T>* s, T value) { push(s, value); } struct all_ftor { __host__ __device__ bool operator() (int i) { return true; } }; struct even_ftor { __host__ __device__ bool operator() (int i) { return (i%2)==0; } }; struct odd_ftor { __host__ __device__ bool operator() (int i) { return i%2; } }; } template <typename T> T* gpu_malloc(size_t n) { T* tmp; auto status = hipMalloc(&tmp, n*sizeof(T)); if (status != 0) { throw std::runtime_error(hipGetErrorName(status)); } return tmp; } template <typename T> void h2d_mem_copy(T* destination, T* source, size_t size) { auto status = hipMemcpy(destination, source, size, hipMemcpyHostToDevice); if (status != 0) { throw std::runtime_error(hipGetErrorName(status)); } } template <typename T> void d2h_mem_copy(T* destination, T* source, size_t size) { auto status = hipMemcpy(destination, source, size, hipMemcpyDeviceToHost); if (status != 0) { throw std::runtime_error(hipGetErrorName(status)); } } // host_stack.hpp template <typename T> class stack { stack_impl<T>* impl_; unsigned capacity_; public: stack(unsigned c) { capacity_ = c; // create a stack_impl on the host and complete stack_impl<T> impl; impl.capacity = capacity_; impl.size = 0; impl.data = gpu_malloc<T>(capacity_); // copy the stack_impl to device memory impl_ impl_ = gpu_malloc<stack_impl<T> >(1); h2d_mem_copy<stack_impl<T> >(impl_, &impl, sizeof(impl)); } std::vector<T> get_and_clear() { // get a copy of the implementation stack_impl<T> impl; d2h_mem_copy<stack_impl<T> >(&impl, impl_, sizeof(stack_impl<T>)); if (impl.size==0u) { return {}; } // copy the data to host std::vector<T> buf(impl.size); auto bytes = (impl.size)*sizeof(T); d2h_mem_copy(buf.data(), impl.data, bytes); // reset the implementation size to zero impl.size = 0; h2d_mem_copy(impl_, &impl, sizeof(impl)); return buf; } stack_impl<T>* get_impl() { return impl_; } }; TEST(stack, construction) { using T = int; unsigned capacity = 10; // 1. Create stack on host (and on device) of capacity 10 stack<T> myStack(capacity); // 2. Push back a data point auto impl_p = myStack.get_impl(); T value = 42; hipLaunchKernelGGL(( kernels::push_back), dim3(1),dim3(1), 0, 0, impl_p, value); // 3. Get data back and confirm size/data auto myData = myStack.get_and_clear(); EXPECT_EQ(1u, myData.size()); EXPECT_EQ(value, myData[0]); } int main () { ::testing::InitGoogleTest( &argc, argv ); return RUN_ALL_TESTS(); }
19b4d9ea2a00a76dec945a276baec0aa2da81770.cu
#include <iostream> #include <vector> #include <stdexcept> #include "gtest.h" #include "cuda.h" #include "cuda_runtime_api.h" // stack_storage.hpp template <typename T> struct stack_impl { unsigned capacity; unsigned size; T* data; }; template <typename T> std::ostream& operator<<(std::ostream& o, stack_impl<T>& s) { return o << "<stack_impl: " << s.capacity << ", " << s.size << ", " << s.data << ">"; } // gpu_stack.hpp template <typename T> __device__ void push(stack_impl<T>* s, T v) { /// Atomically increment the stores counter. The atomicAdd returns // the value of stores before the increment, which is the location // at which this thread can store value. unsigned position = atomicAdd(&(s->size), 1u); // It is possible that stores>capacity. In this case, only capacity // entries are stored, and additional values are lost. The stores // contains the total number of attempts to push. if (position<s->capacity) { s->data[position] = v; } // Note: there are no guards against s.stores overflowing: in which // case the stores counter would start again from 0, and values would // be overwritten from the front of the stack. } namespace kernels { template <typename T> __global__ void push_back(stack_impl<T>* s, T value) { push(s, value); } struct all_ftor { __host__ __device__ bool operator() (int i) { return true; } }; struct even_ftor { __host__ __device__ bool operator() (int i) { return (i%2)==0; } }; struct odd_ftor { __host__ __device__ bool operator() (int i) { return i%2; } }; } template <typename T> T* gpu_malloc(size_t n) { T* tmp; auto status = cudaMalloc(&tmp, n*sizeof(T)); if (status != 0) { throw std::runtime_error(cudaGetErrorName(status)); } return tmp; } template <typename T> void h2d_mem_copy(T* destination, T* source, size_t size) { auto status = cudaMemcpy(destination, source, size, cudaMemcpyHostToDevice); if (status != 0) { throw std::runtime_error(cudaGetErrorName(status)); } } template <typename T> void d2h_mem_copy(T* destination, T* source, size_t size) { auto status = cudaMemcpy(destination, source, size, cudaMemcpyDeviceToHost); if (status != 0) { throw std::runtime_error(cudaGetErrorName(status)); } } // host_stack.hpp template <typename T> class stack { stack_impl<T>* impl_; unsigned capacity_; public: stack(unsigned c) { capacity_ = c; // create a stack_impl on the host and complete stack_impl<T> impl; impl.capacity = capacity_; impl.size = 0; impl.data = gpu_malloc<T>(capacity_); // copy the stack_impl to device memory impl_ impl_ = gpu_malloc<stack_impl<T> >(1); h2d_mem_copy<stack_impl<T> >(impl_, &impl, sizeof(impl)); } std::vector<T> get_and_clear() { // get a copy of the implementation stack_impl<T> impl; d2h_mem_copy<stack_impl<T> >(&impl, impl_, sizeof(stack_impl<T>)); if (impl.size==0u) { return {}; } // copy the data to host std::vector<T> buf(impl.size); auto bytes = (impl.size)*sizeof(T); d2h_mem_copy(buf.data(), impl.data, bytes); // reset the implementation size to zero impl.size = 0; h2d_mem_copy(impl_, &impl, sizeof(impl)); return buf; } stack_impl<T>* get_impl() { return impl_; } }; TEST(stack, construction) { using T = int; unsigned capacity = 10; // 1. Create stack on host (and on device) of capacity 10 stack<T> myStack(capacity); // 2. Push back a data point auto impl_p = myStack.get_impl(); T value = 42; kernels::push_back<<<1,1>>>(impl_p, value); // 3. Get data back and confirm size/data auto myData = myStack.get_and_clear(); EXPECT_EQ(1u, myData.size()); EXPECT_EQ(value, myData[0]); } int main () { ::testing::InitGoogleTest( &argc, argv ); return RUN_ALL_TESTS(); }
02da84a40d0f8058a3e23ef5948d8e415862f9b1.hip
// !!! This is a file automatically generated by hipify!!! /*! \file Sort.cu \author Gregory Diamos <gregory.diamos> \date Wednesday December 1, 2010 \brief The source file for the C interface to CUDA sorting routines. */ #ifndef SORT_CU_INCLUDED #define SORT_CU_INCLUDED // Redfox Includes #include <redfox/nvcc/interface/RelationalAlgebraKernel.h> #include <redfox/ra/interface/Sort.h> #include <redfox/ra/interface/Tuple.h> // Thrust Includes #include <thrust/device_ptr.h> #include <thrust/sort.h> // Hydrazine Includes //#include <hydrazine/interface/debug.h> #include <stdio.h> namespace redfox { void check(hipError_t status) { if(status != hipSuccess) { std::cerr << hipGetErrorString(status) << "\n"; std::abort(); } } struct compare_sort_string { __host__ __device__ bool operator()(unsigned long long int i, unsigned long long int j) { char *string1 = (char *)i; char *string2 = (char *)j; int ii = 0; while(string1[ii] != '\0' && string2[ii] != '\0') { if(string1[ii] != string2[ii]) return (string1[ii] < string2[ii]); ii++; } if(string1[ii] == '\0' && string2[ii] != '\0') return true; else return false; } }; struct compare_sort_gpu128 { __host__ __device__ bool operator()(ra::tuple::PackedNBytes<2> i, ra::tuple::PackedNBytes<2> j) { if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu256 { __host__ __device__ bool operator()(ra::tuple::PackedNBytes<4> i, ra::tuple::PackedNBytes<4> j) { if (i.a[3] != j.a[3]) return (i.a[3] < j.a[3]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu512 { __host__ __device__ bool operator()(ra::tuple::PackedNBytes<8> i, ra::tuple::PackedNBytes<8> j) { if (i.a[7] != j.a[7]) return (i.a[7] < j.a[7]); if (i.a[6] != j.a[6]) return (i.a[6] < j.a[6]); if (i.a[5] != j.a[5]) return (i.a[5] < j.a[5]); if (i.a[4] != j.a[4]) return (i.a[4] < j.a[4]); if (i.a[3] != j.a[3]) return (i.a[3] < j.a[3]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu1024 { __host__ __device__ bool operator()(ra::tuple::PackedNBytes<16> i, ra::tuple::PackedNBytes<16> j) { if (i.a[15] != j.a[15]) return (i.a[15] < j.a[15]); if (i.a[14] != j.a[14]) return (i.a[14] < j.a[14]); if (i.a[13] != j.a[13]) return (i.a[13] < j.a[13]); if (i.a[12] != j.a[12]) return (i.a[12] < j.a[12]); if (i.a[11] != j.a[11]) return (i.a[11] < j.a[11]); if (i.a[10] != j.a[10]) return (i.a[10] < j.a[10]); if (i.a[9] != j.a[9]) return (i.a[9] < j.a[9]); if (i.a[8] != j.a[8]) return (i.a[8] < j.a[8]); if (i.a[7] != j.a[7]) return (i.a[7] < j.a[7]); if (i.a[6] != j.a[6]) return (i.a[6] < j.a[6]); if (i.a[5] != j.a[5]) return (i.a[5] < j.a[5]); if (i.a[4] != j.a[4]) return (i.a[4] < j.a[4]); if (i.a[3] != j.a[3]) return (i.a[2] < j.a[2]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; void sort_string(void* begin, void* end) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float exe_time = 0.0f; hipEventRecord(start,0); typedef thrust::device_ptr<long long unsigned int> ptr; thrust::sort( ptr((long long unsigned int*)begin), ptr((long long unsigned int*)end), compare_sort_string()); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&exe_time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("sort %f\n", exe_time); } void sort(void* begin, void* end, unsigned long long int type) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float exe_time = 0.0f; hipEventRecord(start,0); switch(type) { case nvcc::RelationalAlgebraKernel::I8: { thrust::sort( thrust::device_ptr<unsigned char>((unsigned char*)begin), thrust::device_ptr<unsigned char>((unsigned char*)end)); break; } case nvcc::RelationalAlgebraKernel::I16: { thrust::sort( thrust::device_ptr<unsigned short>((unsigned short*)begin), thrust::device_ptr<unsigned short>((unsigned short*)end)); break; } case nvcc::RelationalAlgebraKernel::I32: { thrust::sort( thrust::device_ptr<unsigned int>((unsigned int*)begin), thrust::device_ptr<unsigned int>((unsigned int*)end)); break; } case nvcc::RelationalAlgebraKernel::I64: { typedef thrust::device_ptr<long long unsigned int> ptr; thrust::sort( ptr((long long unsigned int*)begin), ptr((long long unsigned int*)end)); break; } case nvcc::RelationalAlgebraKernel::I128: { typedef thrust::device_ptr<ra::tuple::PackedNBytes<2> > ptr; thrust::sort( ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), compare_sort_gpu128()); break; } case nvcc::RelationalAlgebraKernel::I256: { typedef thrust::device_ptr<ra::tuple::PackedNBytes<4> > ptr; thrust::sort( ptr((ra::tuple::PackedNBytes<4>*)begin), ptr((ra::tuple::PackedNBytes<4>*)end), compare_sort_gpu256()); break; } case nvcc::RelationalAlgebraKernel::I512: { typedef thrust::device_ptr<ra::tuple::PackedNBytes<8> > ptr; thrust::sort( ptr((ra::tuple::PackedNBytes<8>*)begin), ptr((ra::tuple::PackedNBytes<8>*)end), compare_sort_gpu512()); break; } case nvcc::RelationalAlgebraKernel::I1024: { typedef thrust::device_ptr<ra::tuple::PackedNBytes<16> > ptr; thrust::sort( ptr((ra::tuple::PackedNBytes<16>*)begin), ptr((ra::tuple::PackedNBytes<16>*)end), compare_sort_gpu1024()); break; } default: { break; } } hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&exe_time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("sort %f\n", exe_time); } //void sort2(void* begin, void* end, unsigned long long int type) //{ // hipEvent_t start, stop; // hipEventCreate(&start); hipEventCreate(&stop); // float exe_time = 0.0f; // hipEventRecord(start,0); // // switch(type) // { // case nvcc::RelationalAlgebraKernel::I8: // { // thrust::stable_sort( // thrust::device_ptr<unsigned char>((unsigned char*)begin), // thrust::device_ptr<unsigned char>((unsigned char*)end)); // break; // } // case nvcc::RelationalAlgebraKernel::I16: // { // thrust::stable_sort( // thrust::device_ptr<unsigned short>((unsigned short*)begin), // thrust::device_ptr<unsigned short>((unsigned short*)end)); // break; // } // case nvcc::RelationalAlgebraKernel::I32: // { // thrust::stable_sort( // thrust::device_ptr<unsigned int>((unsigned int*)begin), // thrust::device_ptr<unsigned int>((unsigned int*)end)); // break; // } // case nvcc::RelationalAlgebraKernel::I64: // { // typedef thrust::device_ptr<unsigned long long int> ptr; // // thrust::host_vector<unsigned long long int> h_vec0(1500000); // // thrust::copy(ptr((unsigned long long int*)begin), ptr((unsigned long long int*)end), h_vec0.begin()); // // for(int i = 0; i < 1500000; ++i) // { // if (h_vec0[i] == 0) // printf("%d %llx\n", i, h_vec0[i]); // } // // thrust::stable_sort( // ptr((unsigned long long int*)begin), // ptr((unsigned long long int*)end)); // // thrust::host_vector<unsigned long long int> h_vec(1500000); // // thrust::copy(ptr((unsigned long long int*)begin), ptr((unsigned long long int*)end), h_vec.begin()); // // for(int i = (1500000 - 1); i > (1500000 - 5); i--) // printf("***%d %llx\n", i, h_vec[i]); // // break; // } // case nvcc::RelationalAlgebraKernel::I128: // { // typedef thrust::device_ptr<ra::tuple::PackedNBytes<2> > ptr; // // ra::tuple::PackedNBytes<2> data[1]; // // check(hipMemcpy(data, (ra::tuple::PackedNBytes<2> *)begin, 16, // hipMemcpyDeviceToHost)); // // thrust::host_vector<ra::tuple::PackedNBytes<2> > h_vec(10000); // thrust::copy(ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), h_vec.begin()); // // for(int i = 0; i < 10000; i++) // { // ra::tuple::PackedNBytes<2> temp = h_vec[i] >> (unsigned int)14; // unsigned long long int temp2 = temp.a[0]; // double *pointer = (double *)(&temp2); // double temp3 = *pointer; // // if(temp3 == 1772627.25f) printf("%d\n find the data %llx %llx", i, h_vec[i].a[0], h_vec[i].a[1]); // } //// printf("%llx, %llx\n", h_vec[0].a[0], h_vec[0].a[1]); //// printf("%llx, %llx\n", h_vec[1].a[0], h_vec[1].a[1]); //// printf("%llx, %llx\n", h_vec[8059].a[0], h_vec[8059].a[1]); // // thrust::stable_sort( // ptr((ra::tuple::PackedNBytes<2>*)begin), // ptr((ra::tuple::PackedNBytes<2>*)end), compare_sort_gpu128()); // // ra::tuple::PackedNBytes<2> data2[1]; // // check(hipMemcpy(data2, (ra::tuple::PackedNBytes<2> *)begin, 16, // hipMemcpyDeviceToHost)); // // thrust::host_vector<ra::tuple::PackedNBytes<2> > h_vec2(10000); // thrust::copy(ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), h_vec2.begin()); // // for(int i = 1; i < 10000; i++) // { // ra::tuple::PackedNBytes<2> temp = h_vec2[i] >> (unsigned int)14; // unsigned long long int temp2 = temp.a[0]; // double *pointer = (double *)(&temp2); // double temp3 = *pointer; // // ra::tuple::PackedNBytes<2> temp4 = h_vec2[i - 1] >> (unsigned int)14; // unsigned long long int temp5 = temp4.a[0]; // double *pointer2 = (double *)(&temp5); // double temp6 = *pointer2; // // if(temp3 < temp6) // printf("************after sort wrong %d, %f %llx %llx %f %llx %llx\n", i, temp3, h_vec2[i].a[0], h_vec2[i].a[1], temp6, h_vec2[i-1].a[0], h_vec2[i-1].a[1]); // } // //// //// thrust::host_vector<ra::tuple::PackedNBytes<2> > h_vec2(8060); //// thrust::copy(ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), h_vec2.begin()); //// //// for(int i = 0; i < 8060; i++) //// { //// if(h_vec2[i].a[1] != 0x0 && h_vec2[i].a[1] != 0x1 && h_vec2[i].a[1] != 0x3 && h_vec2[i].a[1] != 0x5) //// printf("after 2nd sort %d, %x\n", i, h_vec2[i].a[1]); //// } // //// printf("%llx, %llx\n", h_vec2[0].a[0], h_vec2[0].a[1]); //// printf("%llx, %llx\n", h_vec[1].a[0], h_vec[1].a[1]); //// printf("%llx, %llx\n", h_vec[8059].a[0], h_vec[8059].a[1]); // // break; // } // case nvcc::RelationalAlgebraKernel::I256: // { // typedef thrust::device_ptr<ra::tuple::PackedNBytes<4> > ptr; // ////unsigned long long int data[16120]; //// //// check(hipMemcpy(data, (char *)begin, 128960, //// hipMemcpyDeviceToHost)); ////printf("before sort %llx, %llx\n", data[0], data[1]); //// thrust::host_vector<ra::tuple::PackedNBytes<2> > h_vec(8060); //// thrust::copy(ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), h_vec.begin()); //// //// for(int i = 0; i < 8060; i++) //// { //// if(h_vec[i].a[1] != 0x0 && h_vec[i].a[1] != 0x1 && h_vec[i].a[1] != 0x3 && h_vec[i].a[1] != 0x5) //// printf("************before 2nd sort %d, %llx, %llx \n", i, h_vec[i].a[1], h_vec[i].a[0]); //// } //// printf("%llx, %llx\n", h_vec[0].a[0], h_vec[0].a[1]); //// printf("%llx, %llx\n", h_vec[1].a[0], h_vec[1].a[1]); //// printf("%llx, %llx\n", h_vec[8059].a[0], h_vec[8059].a[1]); // // thrust::stable_sort( // ptr((ra::tuple::PackedNBytes<4>*)begin), // ptr((ra::tuple::PackedNBytes<4>*)end), compare_sort_gpu256()); // // thrust::host_vector<ra::tuple::PackedNBytes<4> > h_vec2(158960); // thrust::copy(ptr((ra::tuple::PackedNBytes<4>*)begin), ptr((ra::tuple::PackedNBytes<4>*)end), h_vec2.begin()); // // for(int i = 1; i < 158960; i++) // { //// typedef ra::tuple::Tuple<64, 3, 5, 14, 18, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0> Tuple; // typedef ra::tuple::Tuple<18, 64, 3, 5, 14, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0> Tuple; // unsigned int before = ra::tuple::extract<unsigned int, 0, Tuple>(h_vec2[i - 1]); // unsigned int after = ra::tuple::extract<unsigned int, 0, Tuple>(h_vec2[i]); // // if(before > after) printf("***ERR*** %d %u %u\n", i, before, after); // } // // printf("******after sort\n"); //// printf("%llx, %llx\n", h_vec2[0].a[0], h_vec2[0].a[1]); //// printf("%llx, %llx\n", h_vec[1].a[0], h_vec[1].a[1]); //// printf("%llx, %llx\n", h_vec[8059].a[0], h_vec[8059].a[1]); // // break; // } // default: // { // printf("****************sort default********************\n"); // break; // } // } // // hipEventRecord(stop,0); // hipEventSynchronize(stop); // hipEventElapsedTime(&exe_time, start, stop); // hipEventDestroy(start); // hipEventDestroy(stop); // printf("sort %f\n", exe_time); //} } #endif
02da84a40d0f8058a3e23ef5948d8e415862f9b1.cu
/*! \file Sort.cu \author Gregory Diamos <gregory.diamos> \date Wednesday December 1, 2010 \brief The source file for the C interface to CUDA sorting routines. */ #ifndef SORT_CU_INCLUDED #define SORT_CU_INCLUDED // Redfox Includes #include <redfox/nvcc/interface/RelationalAlgebraKernel.h> #include <redfox/ra/interface/Sort.h> #include <redfox/ra/interface/Tuple.h> // Thrust Includes #include <thrust/device_ptr.h> #include <thrust/sort.h> // Hydrazine Includes //#include <hydrazine/interface/debug.h> #include <stdio.h> namespace redfox { void check(cudaError_t status) { if(status != cudaSuccess) { std::cerr << cudaGetErrorString(status) << "\n"; std::abort(); } } struct compare_sort_string { __host__ __device__ bool operator()(unsigned long long int i, unsigned long long int j) { char *string1 = (char *)i; char *string2 = (char *)j; int ii = 0; while(string1[ii] != '\0' && string2[ii] != '\0') { if(string1[ii] != string2[ii]) return (string1[ii] < string2[ii]); ii++; } if(string1[ii] == '\0' && string2[ii] != '\0') return true; else return false; } }; struct compare_sort_gpu128 { __host__ __device__ bool operator()(ra::tuple::PackedNBytes<2> i, ra::tuple::PackedNBytes<2> j) { if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu256 { __host__ __device__ bool operator()(ra::tuple::PackedNBytes<4> i, ra::tuple::PackedNBytes<4> j) { if (i.a[3] != j.a[3]) return (i.a[3] < j.a[3]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu512 { __host__ __device__ bool operator()(ra::tuple::PackedNBytes<8> i, ra::tuple::PackedNBytes<8> j) { if (i.a[7] != j.a[7]) return (i.a[7] < j.a[7]); if (i.a[6] != j.a[6]) return (i.a[6] < j.a[6]); if (i.a[5] != j.a[5]) return (i.a[5] < j.a[5]); if (i.a[4] != j.a[4]) return (i.a[4] < j.a[4]); if (i.a[3] != j.a[3]) return (i.a[3] < j.a[3]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; struct compare_sort_gpu1024 { __host__ __device__ bool operator()(ra::tuple::PackedNBytes<16> i, ra::tuple::PackedNBytes<16> j) { if (i.a[15] != j.a[15]) return (i.a[15] < j.a[15]); if (i.a[14] != j.a[14]) return (i.a[14] < j.a[14]); if (i.a[13] != j.a[13]) return (i.a[13] < j.a[13]); if (i.a[12] != j.a[12]) return (i.a[12] < j.a[12]); if (i.a[11] != j.a[11]) return (i.a[11] < j.a[11]); if (i.a[10] != j.a[10]) return (i.a[10] < j.a[10]); if (i.a[9] != j.a[9]) return (i.a[9] < j.a[9]); if (i.a[8] != j.a[8]) return (i.a[8] < j.a[8]); if (i.a[7] != j.a[7]) return (i.a[7] < j.a[7]); if (i.a[6] != j.a[6]) return (i.a[6] < j.a[6]); if (i.a[5] != j.a[5]) return (i.a[5] < j.a[5]); if (i.a[4] != j.a[4]) return (i.a[4] < j.a[4]); if (i.a[3] != j.a[3]) return (i.a[2] < j.a[2]); if (i.a[2] != j.a[2]) return (i.a[2] < j.a[2]); if (i.a[1] != j.a[1]) return (i.a[1] < j.a[1]); return (i.a[0] < j.a[0]); } }; void sort_string(void* begin, void* end) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float exe_time = 0.0f; cudaEventRecord(start,0); typedef thrust::device_ptr<long long unsigned int> ptr; thrust::sort( ptr((long long unsigned int*)begin), ptr((long long unsigned int*)end), compare_sort_string()); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&exe_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("sort %f\n", exe_time); } void sort(void* begin, void* end, unsigned long long int type) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float exe_time = 0.0f; cudaEventRecord(start,0); switch(type) { case nvcc::RelationalAlgebraKernel::I8: { thrust::sort( thrust::device_ptr<unsigned char>((unsigned char*)begin), thrust::device_ptr<unsigned char>((unsigned char*)end)); break; } case nvcc::RelationalAlgebraKernel::I16: { thrust::sort( thrust::device_ptr<unsigned short>((unsigned short*)begin), thrust::device_ptr<unsigned short>((unsigned short*)end)); break; } case nvcc::RelationalAlgebraKernel::I32: { thrust::sort( thrust::device_ptr<unsigned int>((unsigned int*)begin), thrust::device_ptr<unsigned int>((unsigned int*)end)); break; } case nvcc::RelationalAlgebraKernel::I64: { typedef thrust::device_ptr<long long unsigned int> ptr; thrust::sort( ptr((long long unsigned int*)begin), ptr((long long unsigned int*)end)); break; } case nvcc::RelationalAlgebraKernel::I128: { typedef thrust::device_ptr<ra::tuple::PackedNBytes<2> > ptr; thrust::sort( ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), compare_sort_gpu128()); break; } case nvcc::RelationalAlgebraKernel::I256: { typedef thrust::device_ptr<ra::tuple::PackedNBytes<4> > ptr; thrust::sort( ptr((ra::tuple::PackedNBytes<4>*)begin), ptr((ra::tuple::PackedNBytes<4>*)end), compare_sort_gpu256()); break; } case nvcc::RelationalAlgebraKernel::I512: { typedef thrust::device_ptr<ra::tuple::PackedNBytes<8> > ptr; thrust::sort( ptr((ra::tuple::PackedNBytes<8>*)begin), ptr((ra::tuple::PackedNBytes<8>*)end), compare_sort_gpu512()); break; } case nvcc::RelationalAlgebraKernel::I1024: { typedef thrust::device_ptr<ra::tuple::PackedNBytes<16> > ptr; thrust::sort( ptr((ra::tuple::PackedNBytes<16>*)begin), ptr((ra::tuple::PackedNBytes<16>*)end), compare_sort_gpu1024()); break; } default: { break; } } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&exe_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("sort %f\n", exe_time); } //void sort2(void* begin, void* end, unsigned long long int type) //{ // cudaEvent_t start, stop; // cudaEventCreate(&start); cudaEventCreate(&stop); // float exe_time = 0.0f; // cudaEventRecord(start,0); // // switch(type) // { // case nvcc::RelationalAlgebraKernel::I8: // { // thrust::stable_sort( // thrust::device_ptr<unsigned char>((unsigned char*)begin), // thrust::device_ptr<unsigned char>((unsigned char*)end)); // break; // } // case nvcc::RelationalAlgebraKernel::I16: // { // thrust::stable_sort( // thrust::device_ptr<unsigned short>((unsigned short*)begin), // thrust::device_ptr<unsigned short>((unsigned short*)end)); // break; // } // case nvcc::RelationalAlgebraKernel::I32: // { // thrust::stable_sort( // thrust::device_ptr<unsigned int>((unsigned int*)begin), // thrust::device_ptr<unsigned int>((unsigned int*)end)); // break; // } // case nvcc::RelationalAlgebraKernel::I64: // { // typedef thrust::device_ptr<unsigned long long int> ptr; // // thrust::host_vector<unsigned long long int> h_vec0(1500000); // // thrust::copy(ptr((unsigned long long int*)begin), ptr((unsigned long long int*)end), h_vec0.begin()); // // for(int i = 0; i < 1500000; ++i) // { // if (h_vec0[i] == 0) // printf("%d %llx\n", i, h_vec0[i]); // } // // thrust::stable_sort( // ptr((unsigned long long int*)begin), // ptr((unsigned long long int*)end)); // // thrust::host_vector<unsigned long long int> h_vec(1500000); // // thrust::copy(ptr((unsigned long long int*)begin), ptr((unsigned long long int*)end), h_vec.begin()); // // for(int i = (1500000 - 1); i > (1500000 - 5); i--) // printf("***%d %llx\n", i, h_vec[i]); // // break; // } // case nvcc::RelationalAlgebraKernel::I128: // { // typedef thrust::device_ptr<ra::tuple::PackedNBytes<2> > ptr; // // ra::tuple::PackedNBytes<2> data[1]; // // check(cudaMemcpy(data, (ra::tuple::PackedNBytes<2> *)begin, 16, // cudaMemcpyDeviceToHost)); // // thrust::host_vector<ra::tuple::PackedNBytes<2> > h_vec(10000); // thrust::copy(ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), h_vec.begin()); // // for(int i = 0; i < 10000; i++) // { // ra::tuple::PackedNBytes<2> temp = h_vec[i] >> (unsigned int)14; // unsigned long long int temp2 = temp.a[0]; // double *pointer = (double *)(&temp2); // double temp3 = *pointer; // // if(temp3 == 1772627.25f) printf("%d\n find the data %llx %llx", i, h_vec[i].a[0], h_vec[i].a[1]); // } //// printf("%llx, %llx\n", h_vec[0].a[0], h_vec[0].a[1]); //// printf("%llx, %llx\n", h_vec[1].a[0], h_vec[1].a[1]); //// printf("%llx, %llx\n", h_vec[8059].a[0], h_vec[8059].a[1]); // // thrust::stable_sort( // ptr((ra::tuple::PackedNBytes<2>*)begin), // ptr((ra::tuple::PackedNBytes<2>*)end), compare_sort_gpu128()); // // ra::tuple::PackedNBytes<2> data2[1]; // // check(cudaMemcpy(data2, (ra::tuple::PackedNBytes<2> *)begin, 16, // cudaMemcpyDeviceToHost)); // // thrust::host_vector<ra::tuple::PackedNBytes<2> > h_vec2(10000); // thrust::copy(ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), h_vec2.begin()); // // for(int i = 1; i < 10000; i++) // { // ra::tuple::PackedNBytes<2> temp = h_vec2[i] >> (unsigned int)14; // unsigned long long int temp2 = temp.a[0]; // double *pointer = (double *)(&temp2); // double temp3 = *pointer; // // ra::tuple::PackedNBytes<2> temp4 = h_vec2[i - 1] >> (unsigned int)14; // unsigned long long int temp5 = temp4.a[0]; // double *pointer2 = (double *)(&temp5); // double temp6 = *pointer2; // // if(temp3 < temp6) // printf("************after sort wrong %d, %f %llx %llx %f %llx %llx\n", i, temp3, h_vec2[i].a[0], h_vec2[i].a[1], temp6, h_vec2[i-1].a[0], h_vec2[i-1].a[1]); // } // //// //// thrust::host_vector<ra::tuple::PackedNBytes<2> > h_vec2(8060); //// thrust::copy(ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), h_vec2.begin()); //// //// for(int i = 0; i < 8060; i++) //// { //// if(h_vec2[i].a[1] != 0x0 && h_vec2[i].a[1] != 0x1 && h_vec2[i].a[1] != 0x3 && h_vec2[i].a[1] != 0x5) //// printf("after 2nd sort %d, %x\n", i, h_vec2[i].a[1]); //// } // //// printf("%llx, %llx\n", h_vec2[0].a[0], h_vec2[0].a[1]); //// printf("%llx, %llx\n", h_vec[1].a[0], h_vec[1].a[1]); //// printf("%llx, %llx\n", h_vec[8059].a[0], h_vec[8059].a[1]); // // break; // } // case nvcc::RelationalAlgebraKernel::I256: // { // typedef thrust::device_ptr<ra::tuple::PackedNBytes<4> > ptr; // ////unsigned long long int data[16120]; //// //// check(cudaMemcpy(data, (char *)begin, 128960, //// cudaMemcpyDeviceToHost)); ////printf("before sort %llx, %llx\n", data[0], data[1]); //// thrust::host_vector<ra::tuple::PackedNBytes<2> > h_vec(8060); //// thrust::copy(ptr((ra::tuple::PackedNBytes<2>*)begin), ptr((ra::tuple::PackedNBytes<2>*)end), h_vec.begin()); //// //// for(int i = 0; i < 8060; i++) //// { //// if(h_vec[i].a[1] != 0x0 && h_vec[i].a[1] != 0x1 && h_vec[i].a[1] != 0x3 && h_vec[i].a[1] != 0x5) //// printf("************before 2nd sort %d, %llx, %llx \n", i, h_vec[i].a[1], h_vec[i].a[0]); //// } //// printf("%llx, %llx\n", h_vec[0].a[0], h_vec[0].a[1]); //// printf("%llx, %llx\n", h_vec[1].a[0], h_vec[1].a[1]); //// printf("%llx, %llx\n", h_vec[8059].a[0], h_vec[8059].a[1]); // // thrust::stable_sort( // ptr((ra::tuple::PackedNBytes<4>*)begin), // ptr((ra::tuple::PackedNBytes<4>*)end), compare_sort_gpu256()); // // thrust::host_vector<ra::tuple::PackedNBytes<4> > h_vec2(158960); // thrust::copy(ptr((ra::tuple::PackedNBytes<4>*)begin), ptr((ra::tuple::PackedNBytes<4>*)end), h_vec2.begin()); // // for(int i = 1; i < 158960; i++) // { //// typedef ra::tuple::Tuple<64, 3, 5, 14, 18, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0> Tuple; // typedef ra::tuple::Tuple<18, 64, 3, 5, 14, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0> Tuple; // unsigned int before = ra::tuple::extract<unsigned int, 0, Tuple>(h_vec2[i - 1]); // unsigned int after = ra::tuple::extract<unsigned int, 0, Tuple>(h_vec2[i]); // // if(before > after) printf("***ERR*** %d %u %u\n", i, before, after); // } // // printf("******after sort\n"); //// printf("%llx, %llx\n", h_vec2[0].a[0], h_vec2[0].a[1]); //// printf("%llx, %llx\n", h_vec[1].a[0], h_vec[1].a[1]); //// printf("%llx, %llx\n", h_vec[8059].a[0], h_vec[8059].a[1]); // // break; // } // default: // { // printf("****************sort default********************\n"); // break; // } // } // // cudaEventRecord(stop,0); // cudaEventSynchronize(stop); // cudaEventElapsedTime(&exe_time, start, stop); // cudaEventDestroy(start); // cudaEventDestroy(stop); // printf("sort %f\n", exe_time); //} } #endif
23ab04f438ef7dc9fdd669793f85292a42262fa2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "softmax.h" Softmax::Softmax(nn::shape_3d input_shape) { this->type = SOFTMAX_LAYER_TYPE; this->input_shape = input_shape; this->output_shape = input_shape; this->output_fdim = output_shape.width * output_shape.height * output_shape.channel; printf("+ Softmax layer, output_shape=(%d,%d,%d)\n", output_shape.width, output_shape.height, output_shape.channel); hipMalloc(&expSum, sizeof(float)); hipMalloc(&loss, sizeof(float)); hipMalloc(&output, sizeof(float) * output_fdim); } Softmax::~Softmax() { hipFree(expSum); hipFree(loss); hipFree(output); }; void Softmax::feed(int label) { this->label = label; } float Softmax::computeLoss() { // fetch probability float prob[output_fdim]; hipMemcpy(prob, output, sizeof(float) * output_fdim, hipMemcpyDeviceToHost); return -log(prob[label]); } /* Forward */ void Softmax::forward() { hipLaunchKernelGGL(( softmax::forwardSum), dim3(1),dim3(output_fdim), 0, 0, expSum, prev_output, output_fdim); hipLaunchKernelGGL(( softmax::forwardNorm), dim3(1),dim3(output_fdim), 0, 0, output, prev_output, expSum, output_fdim); } __global__ void softmax::forwardSum(float *expSum, float *prev_output, int output_fdim) { int tid = threadIdx.x; if(tid < output_fdim) { float expLogit = exp(prev_output[tid]); atomicAdd(expSum, expLogit); } } __global__ void softmax::forwardNorm(float *output, float *prev_output, float *expSum, int output_fdim) { int tid = threadIdx.x; if(tid < output_fdim) { output[tid] = exp(prev_output[tid])/(*expSum+EPSILON); } } /* Backward */ void Softmax::backward() { hipLaunchKernelGGL(( softmax::backward), dim3(1),dim3(output_fdim), 0, 0, prev_d_output, output, label, output_fdim); } __global__ void softmax::backward(float *prev_d_output, float *output, int label, int output_fdim) { int tid = threadIdx.x; if(tid < output_fdim) { prev_d_output[tid] = ((label==tid ? -1.0f : 0.0f) + output[tid]); } } void Softmax::clear() { hipMemset(loss, 0x00, sizeof(float)); hipMemset(expSum, 0x00, sizeof(float)); hipMemset(output, 0x00, sizeof(float)*output_fdim); };
23ab04f438ef7dc9fdd669793f85292a42262fa2.cu
#include "softmax.h" Softmax::Softmax(nn::shape_3d input_shape) { this->type = SOFTMAX_LAYER_TYPE; this->input_shape = input_shape; this->output_shape = input_shape; this->output_fdim = output_shape.width * output_shape.height * output_shape.channel; printf("+ Softmax layer, output_shape=(%d,%d,%d)\n", output_shape.width, output_shape.height, output_shape.channel); cudaMalloc(&expSum, sizeof(float)); cudaMalloc(&loss, sizeof(float)); cudaMalloc(&output, sizeof(float) * output_fdim); } Softmax::~Softmax() { cudaFree(expSum); cudaFree(loss); cudaFree(output); }; void Softmax::feed(int label) { this->label = label; } float Softmax::computeLoss() { // fetch probability float prob[output_fdim]; cudaMemcpy(prob, output, sizeof(float) * output_fdim, cudaMemcpyDeviceToHost); return -log(prob[label]); } /* Forward */ void Softmax::forward() { softmax::forwardSum<<<1,output_fdim>>>(expSum, prev_output, output_fdim); softmax::forwardNorm<<<1,output_fdim>>>(output, prev_output, expSum, output_fdim); } __global__ void softmax::forwardSum(float *expSum, float *prev_output, int output_fdim) { int tid = threadIdx.x; if(tid < output_fdim) { float expLogit = exp(prev_output[tid]); atomicAdd(expSum, expLogit); } } __global__ void softmax::forwardNorm(float *output, float *prev_output, float *expSum, int output_fdim) { int tid = threadIdx.x; if(tid < output_fdim) { output[tid] = exp(prev_output[tid])/(*expSum+EPSILON); } } /* Backward */ void Softmax::backward() { softmax::backward<<<1,output_fdim>>>(prev_d_output, output, label, output_fdim); } __global__ void softmax::backward(float *prev_d_output, float *output, int label, int output_fdim) { int tid = threadIdx.x; if(tid < output_fdim) { prev_d_output[tid] = ((label==tid ? -1.0f : 0.0f) + output[tid]); } } void Softmax::clear() { cudaMemset(loss, 0x00, sizeof(float)); cudaMemset(expSum, 0x00, sizeof(float)); cudaMemset(output, 0x00, sizeof(float)*output_fdim); };
661452f183f08c8119edb900c1c84620c4b30605.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "des_kernel_encrypt.h" #include "des_kernel_salt_instances.h" #ifdef DESGPU_COMPILE_ALL_SALTS void des_25_encrypt_salt384(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 20, 21, 6, 7, 8, 15, 16, 17, 18, 19, 20, 19, 4, 5, 22, 23, 24, 63, 32, 33, 34, 35, 36, 35, 52, 53, 38, 39, 40, 47, 48, 49, 50, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt385(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 20, 21, 6, 7, 8, 31, 16, 17, 18, 19, 20, 19, 4, 5, 22, 23, 24, 47, 32, 33, 34, 35, 36, 35, 52, 53, 38, 39, 40, 63, 48, 49, 50, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt386(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 20, 21, 6, 7, 8, 15, 0, 17, 18, 19, 20, 19, 4, 5, 22, 23, 24, 63, 48, 33, 34, 35, 36, 35, 52, 53, 38, 39, 40, 47, 32, 49, 50, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt387(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 20, 21, 6, 7, 8, 31, 0, 17, 18, 19, 20, 19, 4, 5, 22, 23, 24, 47, 48, 33, 34, 35, 36, 35, 52, 53, 38, 39, 40, 63, 32, 49, 50, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt388(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 20, 21, 6, 7, 8, 15, 16, 1, 18, 19, 20, 19, 4, 5, 22, 23, 24, 63, 32, 49, 34, 35, 36, 35, 52, 53, 38, 39, 40, 47, 48, 33, 50, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt389(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 20, 21, 6, 7, 8, 31, 16, 1, 18, 19, 20, 19, 4, 5, 22, 23, 24, 47, 32, 49, 34, 35, 36, 35, 52, 53, 38, 39, 40, 63, 48, 33, 50, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt390(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 20, 21, 6, 7, 8, 15, 0, 1, 18, 19, 20, 19, 4, 5, 22, 23, 24, 63, 48, 49, 34, 35, 36, 35, 52, 53, 38, 39, 40, 47, 32, 33, 50, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt391(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 20, 21, 6, 7, 8, 31, 0, 1, 18, 19, 20, 19, 4, 5, 22, 23, 24, 47, 48, 49, 34, 35, 36, 35, 52, 53, 38, 39, 40, 63, 32, 33, 50, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt392(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 20, 21, 6, 7, 8, 15, 16, 17, 2, 19, 20, 19, 4, 5, 22, 23, 24, 63, 32, 33, 50, 35, 36, 35, 52, 53, 38, 39, 40, 47, 48, 49, 34, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt393(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 20, 21, 6, 7, 8, 31, 16, 17, 2, 19, 20, 19, 4, 5, 22, 23, 24, 47, 32, 33, 50, 35, 36, 35, 52, 53, 38, 39, 40, 63, 48, 49, 34, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt394(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 20, 21, 6, 7, 8, 15, 0, 17, 2, 19, 20, 19, 4, 5, 22, 23, 24, 63, 48, 33, 50, 35, 36, 35, 52, 53, 38, 39, 40, 47, 32, 49, 34, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt395(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 20, 21, 6, 7, 8, 31, 0, 17, 2, 19, 20, 19, 4, 5, 22, 23, 24, 47, 48, 33, 50, 35, 36, 35, 52, 53, 38, 39, 40, 63, 32, 49, 34, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt396(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 20, 21, 6, 7, 8, 15, 16, 1, 2, 19, 20, 19, 4, 5, 22, 23, 24, 63, 32, 49, 50, 35, 36, 35, 52, 53, 38, 39, 40, 47, 48, 33, 34, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt397(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 20, 21, 6, 7, 8, 31, 16, 1, 2, 19, 20, 19, 4, 5, 22, 23, 24, 47, 32, 49, 50, 35, 36, 35, 52, 53, 38, 39, 40, 63, 48, 33, 34, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt398(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 20, 21, 6, 7, 8, 15, 0, 1, 2, 19, 20, 19, 4, 5, 22, 23, 24, 63, 48, 49, 50, 35, 36, 35, 52, 53, 38, 39, 40, 47, 32, 33, 34, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt399(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 20, 21, 6, 7, 8, 31, 0, 1, 2, 19, 20, 19, 4, 5, 22, 23, 24, 47, 48, 49, 50, 35, 36, 35, 52, 53, 38, 39, 40, 63, 32, 33, 34, 51, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt400(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 20, 21, 6, 7, 8, 15, 16, 17, 18, 3, 20, 19, 4, 5, 22, 23, 24, 63, 32, 33, 34, 51, 36, 35, 52, 53, 38, 39, 40, 47, 48, 49, 50, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt401(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 20, 21, 6, 7, 8, 31, 16, 17, 18, 3, 20, 19, 4, 5, 22, 23, 24, 47, 32, 33, 34, 51, 36, 35, 52, 53, 38, 39, 40, 63, 48, 49, 50, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt402(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 20, 21, 6, 7, 8, 15, 0, 17, 18, 3, 20, 19, 4, 5, 22, 23, 24, 63, 48, 33, 34, 51, 36, 35, 52, 53, 38, 39, 40, 47, 32, 49, 50, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt403(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 20, 21, 6, 7, 8, 31, 0, 17, 18, 3, 20, 19, 4, 5, 22, 23, 24, 47, 48, 33, 34, 51, 36, 35, 52, 53, 38, 39, 40, 63, 32, 49, 50, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt404(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 20, 21, 6, 7, 8, 15, 16, 1, 18, 3, 20, 19, 4, 5, 22, 23, 24, 63, 32, 49, 34, 51, 36, 35, 52, 53, 38, 39, 40, 47, 48, 33, 50, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt405(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 20, 21, 6, 7, 8, 31, 16, 1, 18, 3, 20, 19, 4, 5, 22, 23, 24, 47, 32, 49, 34, 51, 36, 35, 52, 53, 38, 39, 40, 63, 48, 33, 50, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt406(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 20, 21, 6, 7, 8, 15, 0, 1, 18, 3, 20, 19, 4, 5, 22, 23, 24, 63, 48, 49, 34, 51, 36, 35, 52, 53, 38, 39, 40, 47, 32, 33, 50, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt407(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 20, 21, 6, 7, 8, 31, 0, 1, 18, 3, 20, 19, 4, 5, 22, 23, 24, 47, 48, 49, 34, 51, 36, 35, 52, 53, 38, 39, 40, 63, 32, 33, 50, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt408(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 20, 21, 6, 7, 8, 15, 16, 17, 2, 3, 20, 19, 4, 5, 22, 23, 24, 63, 32, 33, 50, 51, 36, 35, 52, 53, 38, 39, 40, 47, 48, 49, 34, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt409(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 20, 21, 6, 7, 8, 31, 16, 17, 2, 3, 20, 19, 4, 5, 22, 23, 24, 47, 32, 33, 50, 51, 36, 35, 52, 53, 38, 39, 40, 63, 48, 49, 34, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt410(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 20, 21, 6, 7, 8, 15, 0, 17, 2, 3, 20, 19, 4, 5, 22, 23, 24, 63, 48, 33, 50, 51, 36, 35, 52, 53, 38, 39, 40, 47, 32, 49, 34, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt411(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 20, 21, 6, 7, 8, 31, 0, 17, 2, 3, 20, 19, 4, 5, 22, 23, 24, 47, 48, 33, 50, 51, 36, 35, 52, 53, 38, 39, 40, 63, 32, 49, 34, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt412(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 20, 21, 6, 7, 8, 15, 16, 1, 2, 3, 20, 19, 4, 5, 22, 23, 24, 63, 32, 49, 50, 51, 36, 35, 52, 53, 38, 39, 40, 47, 48, 33, 34, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt413(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 20, 21, 6, 7, 8, 31, 16, 1, 2, 3, 20, 19, 4, 5, 22, 23, 24, 47, 32, 49, 50, 51, 36, 35, 52, 53, 38, 39, 40, 63, 48, 33, 34, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt414(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 20, 21, 6, 7, 8, 15, 0, 1, 2, 3, 20, 19, 4, 5, 22, 23, 24, 63, 48, 49, 50, 51, 36, 35, 52, 53, 38, 39, 40, 47, 32, 33, 34, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt415(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 20, 21, 6, 7, 8, 31, 0, 1, 2, 3, 20, 19, 4, 5, 22, 23, 24, 47, 48, 49, 50, 51, 36, 35, 52, 53, 38, 39, 40, 63, 32, 33, 34, 35, 52, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt416(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 20, 21, 6, 7, 8, 15, 16, 17, 18, 19, 4, 19, 4, 5, 22, 23, 24, 63, 32, 33, 34, 35, 52, 35, 52, 53, 38, 39, 40, 47, 48, 49, 50, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt417(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 20, 21, 6, 7, 8, 31, 16, 17, 18, 19, 4, 19, 4, 5, 22, 23, 24, 47, 32, 33, 34, 35, 52, 35, 52, 53, 38, 39, 40, 63, 48, 49, 50, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt418(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 20, 21, 6, 7, 8, 15, 0, 17, 18, 19, 4, 19, 4, 5, 22, 23, 24, 63, 48, 33, 34, 35, 52, 35, 52, 53, 38, 39, 40, 47, 32, 49, 50, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt419(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 20, 21, 6, 7, 8, 31, 0, 17, 18, 19, 4, 19, 4, 5, 22, 23, 24, 47, 48, 33, 34, 35, 52, 35, 52, 53, 38, 39, 40, 63, 32, 49, 50, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt420(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 20, 21, 6, 7, 8, 15, 16, 1, 18, 19, 4, 19, 4, 5, 22, 23, 24, 63, 32, 49, 34, 35, 52, 35, 52, 53, 38, 39, 40, 47, 48, 33, 50, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt421(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 20, 21, 6, 7, 8, 31, 16, 1, 18, 19, 4, 19, 4, 5, 22, 23, 24, 47, 32, 49, 34, 35, 52, 35, 52, 53, 38, 39, 40, 63, 48, 33, 50, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt422(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 20, 21, 6, 7, 8, 15, 0, 1, 18, 19, 4, 19, 4, 5, 22, 23, 24, 63, 48, 49, 34, 35, 52, 35, 52, 53, 38, 39, 40, 47, 32, 33, 50, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt423(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 20, 21, 6, 7, 8, 31, 0, 1, 18, 19, 4, 19, 4, 5, 22, 23, 24, 47, 48, 49, 34, 35, 52, 35, 52, 53, 38, 39, 40, 63, 32, 33, 50, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt424(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 20, 21, 6, 7, 8, 15, 16, 17, 2, 19, 4, 19, 4, 5, 22, 23, 24, 63, 32, 33, 50, 35, 52, 35, 52, 53, 38, 39, 40, 47, 48, 49, 34, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt425(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 20, 21, 6, 7, 8, 31, 16, 17, 2, 19, 4, 19, 4, 5, 22, 23, 24, 47, 32, 33, 50, 35, 52, 35, 52, 53, 38, 39, 40, 63, 48, 49, 34, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt426(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 20, 21, 6, 7, 8, 15, 0, 17, 2, 19, 4, 19, 4, 5, 22, 23, 24, 63, 48, 33, 50, 35, 52, 35, 52, 53, 38, 39, 40, 47, 32, 49, 34, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt427(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 20, 21, 6, 7, 8, 31, 0, 17, 2, 19, 4, 19, 4, 5, 22, 23, 24, 47, 48, 33, 50, 35, 52, 35, 52, 53, 38, 39, 40, 63, 32, 49, 34, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt428(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 20, 21, 6, 7, 8, 15, 16, 1, 2, 19, 4, 19, 4, 5, 22, 23, 24, 63, 32, 49, 50, 35, 52, 35, 52, 53, 38, 39, 40, 47, 48, 33, 34, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt429(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 20, 21, 6, 7, 8, 31, 16, 1, 2, 19, 4, 19, 4, 5, 22, 23, 24, 47, 32, 49, 50, 35, 52, 35, 52, 53, 38, 39, 40, 63, 48, 33, 34, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt430(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 20, 21, 6, 7, 8, 15, 0, 1, 2, 19, 4, 19, 4, 5, 22, 23, 24, 63, 48, 49, 50, 35, 52, 35, 52, 53, 38, 39, 40, 47, 32, 33, 34, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt431(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 20, 21, 6, 7, 8, 31, 0, 1, 2, 19, 4, 19, 4, 5, 22, 23, 24, 47, 48, 49, 50, 35, 52, 35, 52, 53, 38, 39, 40, 63, 32, 33, 34, 51, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt432(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 20, 21, 6, 7, 8, 15, 16, 17, 18, 3, 4, 19, 4, 5, 22, 23, 24, 63, 32, 33, 34, 51, 52, 35, 52, 53, 38, 39, 40, 47, 48, 49, 50, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt433(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 20, 21, 6, 7, 8, 31, 16, 17, 18, 3, 4, 19, 4, 5, 22, 23, 24, 47, 32, 33, 34, 51, 52, 35, 52, 53, 38, 39, 40, 63, 48, 49, 50, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt434(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 20, 21, 6, 7, 8, 15, 0, 17, 18, 3, 4, 19, 4, 5, 22, 23, 24, 63, 48, 33, 34, 51, 52, 35, 52, 53, 38, 39, 40, 47, 32, 49, 50, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt435(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 20, 21, 6, 7, 8, 31, 0, 17, 18, 3, 4, 19, 4, 5, 22, 23, 24, 47, 48, 33, 34, 51, 52, 35, 52, 53, 38, 39, 40, 63, 32, 49, 50, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt436(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 20, 21, 6, 7, 8, 15, 16, 1, 18, 3, 4, 19, 4, 5, 22, 23, 24, 63, 32, 49, 34, 51, 52, 35, 52, 53, 38, 39, 40, 47, 48, 33, 50, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt437(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 20, 21, 6, 7, 8, 31, 16, 1, 18, 3, 4, 19, 4, 5, 22, 23, 24, 47, 32, 49, 34, 51, 52, 35, 52, 53, 38, 39, 40, 63, 48, 33, 50, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt438(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 20, 21, 6, 7, 8, 15, 0, 1, 18, 3, 4, 19, 4, 5, 22, 23, 24, 63, 48, 49, 34, 51, 52, 35, 52, 53, 38, 39, 40, 47, 32, 33, 50, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt439(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 20, 21, 6, 7, 8, 31, 0, 1, 18, 3, 4, 19, 4, 5, 22, 23, 24, 47, 48, 49, 34, 51, 52, 35, 52, 53, 38, 39, 40, 63, 32, 33, 50, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt440(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 20, 21, 6, 7, 8, 15, 16, 17, 2, 3, 4, 19, 4, 5, 22, 23, 24, 63, 32, 33, 50, 51, 52, 35, 52, 53, 38, 39, 40, 47, 48, 49, 34, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt441(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 20, 21, 6, 7, 8, 31, 16, 17, 2, 3, 4, 19, 4, 5, 22, 23, 24, 47, 32, 33, 50, 51, 52, 35, 52, 53, 38, 39, 40, 63, 48, 49, 34, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt442(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 20, 21, 6, 7, 8, 15, 0, 17, 2, 3, 4, 19, 4, 5, 22, 23, 24, 63, 48, 33, 50, 51, 52, 35, 52, 53, 38, 39, 40, 47, 32, 49, 34, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt443(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 20, 21, 6, 7, 8, 31, 0, 17, 2, 3, 4, 19, 4, 5, 22, 23, 24, 47, 48, 33, 50, 51, 52, 35, 52, 53, 38, 39, 40, 63, 32, 49, 34, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt444(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 20, 21, 6, 7, 8, 15, 16, 1, 2, 3, 4, 19, 4, 5, 22, 23, 24, 63, 32, 49, 50, 51, 52, 35, 52, 53, 38, 39, 40, 47, 48, 33, 34, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt445(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 20, 21, 6, 7, 8, 31, 16, 1, 2, 3, 4, 19, 4, 5, 22, 23, 24, 47, 32, 49, 50, 51, 52, 35, 52, 53, 38, 39, 40, 63, 48, 33, 34, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt446(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 20, 21, 6, 7, 8, 15, 0, 1, 2, 3, 4, 19, 4, 5, 22, 23, 24, 63, 48, 49, 50, 51, 52, 35, 52, 53, 38, 39, 40, 47, 32, 33, 34, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt447(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 20, 21, 6, 7, 8, 31, 0, 1, 2, 3, 4, 19, 4, 5, 22, 23, 24, 47, 48, 49, 50, 51, 52, 35, 52, 53, 38, 39, 40, 63, 32, 33, 34, 35, 36, 51, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt448(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 20, 21, 6, 7, 8, 15, 16, 17, 18, 19, 20, 3, 4, 5, 22, 23, 24, 63, 32, 33, 34, 35, 36, 51, 52, 53, 38, 39, 40, 47, 48, 49, 50, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt449(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 20, 21, 6, 7, 8, 31, 16, 17, 18, 19, 20, 3, 4, 5, 22, 23, 24, 47, 32, 33, 34, 35, 36, 51, 52, 53, 38, 39, 40, 63, 48, 49, 50, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt450(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 20, 21, 6, 7, 8, 15, 0, 17, 18, 19, 20, 3, 4, 5, 22, 23, 24, 63, 48, 33, 34, 35, 36, 51, 52, 53, 38, 39, 40, 47, 32, 49, 50, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt451(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 20, 21, 6, 7, 8, 31, 0, 17, 18, 19, 20, 3, 4, 5, 22, 23, 24, 47, 48, 33, 34, 35, 36, 51, 52, 53, 38, 39, 40, 63, 32, 49, 50, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt452(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 20, 21, 6, 7, 8, 15, 16, 1, 18, 19, 20, 3, 4, 5, 22, 23, 24, 63, 32, 49, 34, 35, 36, 51, 52, 53, 38, 39, 40, 47, 48, 33, 50, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt453(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 20, 21, 6, 7, 8, 31, 16, 1, 18, 19, 20, 3, 4, 5, 22, 23, 24, 47, 32, 49, 34, 35, 36, 51, 52, 53, 38, 39, 40, 63, 48, 33, 50, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt454(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 20, 21, 6, 7, 8, 15, 0, 1, 18, 19, 20, 3, 4, 5, 22, 23, 24, 63, 48, 49, 34, 35, 36, 51, 52, 53, 38, 39, 40, 47, 32, 33, 50, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt455(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 20, 21, 6, 7, 8, 31, 0, 1, 18, 19, 20, 3, 4, 5, 22, 23, 24, 47, 48, 49, 34, 35, 36, 51, 52, 53, 38, 39, 40, 63, 32, 33, 50, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt456(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 20, 21, 6, 7, 8, 15, 16, 17, 2, 19, 20, 3, 4, 5, 22, 23, 24, 63, 32, 33, 50, 35, 36, 51, 52, 53, 38, 39, 40, 47, 48, 49, 34, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt457(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 20, 21, 6, 7, 8, 31, 16, 17, 2, 19, 20, 3, 4, 5, 22, 23, 24, 47, 32, 33, 50, 35, 36, 51, 52, 53, 38, 39, 40, 63, 48, 49, 34, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt458(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 20, 21, 6, 7, 8, 15, 0, 17, 2, 19, 20, 3, 4, 5, 22, 23, 24, 63, 48, 33, 50, 35, 36, 51, 52, 53, 38, 39, 40, 47, 32, 49, 34, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt459(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 20, 21, 6, 7, 8, 31, 0, 17, 2, 19, 20, 3, 4, 5, 22, 23, 24, 47, 48, 33, 50, 35, 36, 51, 52, 53, 38, 39, 40, 63, 32, 49, 34, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt460(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 20, 21, 6, 7, 8, 15, 16, 1, 2, 19, 20, 3, 4, 5, 22, 23, 24, 63, 32, 49, 50, 35, 36, 51, 52, 53, 38, 39, 40, 47, 48, 33, 34, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt461(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 20, 21, 6, 7, 8, 31, 16, 1, 2, 19, 20, 3, 4, 5, 22, 23, 24, 47, 32, 49, 50, 35, 36, 51, 52, 53, 38, 39, 40, 63, 48, 33, 34, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt462(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 20, 21, 6, 7, 8, 15, 0, 1, 2, 19, 20, 3, 4, 5, 22, 23, 24, 63, 48, 49, 50, 35, 36, 51, 52, 53, 38, 39, 40, 47, 32, 33, 34, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt463(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 20, 21, 6, 7, 8, 31, 0, 1, 2, 19, 20, 3, 4, 5, 22, 23, 24, 47, 48, 49, 50, 35, 36, 51, 52, 53, 38, 39, 40, 63, 32, 33, 34, 51, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt464(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 20, 21, 6, 7, 8, 15, 16, 17, 18, 3, 20, 3, 4, 5, 22, 23, 24, 63, 32, 33, 34, 51, 36, 51, 52, 53, 38, 39, 40, 47, 48, 49, 50, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt465(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 20, 21, 6, 7, 8, 31, 16, 17, 18, 3, 20, 3, 4, 5, 22, 23, 24, 47, 32, 33, 34, 51, 36, 51, 52, 53, 38, 39, 40, 63, 48, 49, 50, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt466(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 20, 21, 6, 7, 8, 15, 0, 17, 18, 3, 20, 3, 4, 5, 22, 23, 24, 63, 48, 33, 34, 51, 36, 51, 52, 53, 38, 39, 40, 47, 32, 49, 50, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt467(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 20, 21, 6, 7, 8, 31, 0, 17, 18, 3, 20, 3, 4, 5, 22, 23, 24, 47, 48, 33, 34, 51, 36, 51, 52, 53, 38, 39, 40, 63, 32, 49, 50, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt468(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 20, 21, 6, 7, 8, 15, 16, 1, 18, 3, 20, 3, 4, 5, 22, 23, 24, 63, 32, 49, 34, 51, 36, 51, 52, 53, 38, 39, 40, 47, 48, 33, 50, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt469(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 20, 21, 6, 7, 8, 31, 16, 1, 18, 3, 20, 3, 4, 5, 22, 23, 24, 47, 32, 49, 34, 51, 36, 51, 52, 53, 38, 39, 40, 63, 48, 33, 50, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt470(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 20, 21, 6, 7, 8, 15, 0, 1, 18, 3, 20, 3, 4, 5, 22, 23, 24, 63, 48, 49, 34, 51, 36, 51, 52, 53, 38, 39, 40, 47, 32, 33, 50, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt471(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 20, 21, 6, 7, 8, 31, 0, 1, 18, 3, 20, 3, 4, 5, 22, 23, 24, 47, 48, 49, 34, 51, 36, 51, 52, 53, 38, 39, 40, 63, 32, 33, 50, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt472(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 20, 21, 6, 7, 8, 15, 16, 17, 2, 3, 20, 3, 4, 5, 22, 23, 24, 63, 32, 33, 50, 51, 36, 51, 52, 53, 38, 39, 40, 47, 48, 49, 34, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt473(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 20, 21, 6, 7, 8, 31, 16, 17, 2, 3, 20, 3, 4, 5, 22, 23, 24, 47, 32, 33, 50, 51, 36, 51, 52, 53, 38, 39, 40, 63, 48, 49, 34, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt474(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 20, 21, 6, 7, 8, 15, 0, 17, 2, 3, 20, 3, 4, 5, 22, 23, 24, 63, 48, 33, 50, 51, 36, 51, 52, 53, 38, 39, 40, 47, 32, 49, 34, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt475(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 20, 21, 6, 7, 8, 31, 0, 17, 2, 3, 20, 3, 4, 5, 22, 23, 24, 47, 48, 33, 50, 51, 36, 51, 52, 53, 38, 39, 40, 63, 32, 49, 34, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt476(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 20, 21, 6, 7, 8, 15, 16, 1, 2, 3, 20, 3, 4, 5, 22, 23, 24, 63, 32, 49, 50, 51, 36, 51, 52, 53, 38, 39, 40, 47, 48, 33, 34, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt477(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 20, 21, 6, 7, 8, 31, 16, 1, 2, 3, 20, 3, 4, 5, 22, 23, 24, 47, 32, 49, 50, 51, 36, 51, 52, 53, 38, 39, 40, 63, 48, 33, 34, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt478(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 20, 21, 6, 7, 8, 15, 0, 1, 2, 3, 20, 3, 4, 5, 22, 23, 24, 63, 48, 49, 50, 51, 36, 51, 52, 53, 38, 39, 40, 47, 32, 33, 34, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt479(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 20, 21, 6, 7, 8, 31, 0, 1, 2, 3, 20, 3, 4, 5, 22, 23, 24, 47, 48, 49, 50, 51, 36, 51, 52, 53, 38, 39, 40, 63, 32, 33, 34, 35, 52, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt480(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 20, 21, 6, 7, 8, 15, 16, 17, 18, 19, 4, 3, 4, 5, 22, 23, 24, 63, 32, 33, 34, 35, 52, 51, 52, 53, 38, 39, 40, 47, 48, 49, 50, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt481(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 20, 21, 6, 7, 8, 31, 16, 17, 18, 19, 4, 3, 4, 5, 22, 23, 24, 47, 32, 33, 34, 35, 52, 51, 52, 53, 38, 39, 40, 63, 48, 49, 50, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt482(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 20, 21, 6, 7, 8, 15, 0, 17, 18, 19, 4, 3, 4, 5, 22, 23, 24, 63, 48, 33, 34, 35, 52, 51, 52, 53, 38, 39, 40, 47, 32, 49, 50, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt483(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 20, 21, 6, 7, 8, 31, 0, 17, 18, 19, 4, 3, 4, 5, 22, 23, 24, 47, 48, 33, 34, 35, 52, 51, 52, 53, 38, 39, 40, 63, 32, 49, 50, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt484(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 20, 21, 6, 7, 8, 15, 16, 1, 18, 19, 4, 3, 4, 5, 22, 23, 24, 63, 32, 49, 34, 35, 52, 51, 52, 53, 38, 39, 40, 47, 48, 33, 50, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt485(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 20, 21, 6, 7, 8, 31, 16, 1, 18, 19, 4, 3, 4, 5, 22, 23, 24, 47, 32, 49, 34, 35, 52, 51, 52, 53, 38, 39, 40, 63, 48, 33, 50, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt486(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 20, 21, 6, 7, 8, 15, 0, 1, 18, 19, 4, 3, 4, 5, 22, 23, 24, 63, 48, 49, 34, 35, 52, 51, 52, 53, 38, 39, 40, 47, 32, 33, 50, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt487(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 20, 21, 6, 7, 8, 31, 0, 1, 18, 19, 4, 3, 4, 5, 22, 23, 24, 47, 48, 49, 34, 35, 52, 51, 52, 53, 38, 39, 40, 63, 32, 33, 50, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt488(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 20, 21, 6, 7, 8, 15, 16, 17, 2, 19, 4, 3, 4, 5, 22, 23, 24, 63, 32, 33, 50, 35, 52, 51, 52, 53, 38, 39, 40, 47, 48, 49, 34, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt489(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 20, 21, 6, 7, 8, 31, 16, 17, 2, 19, 4, 3, 4, 5, 22, 23, 24, 47, 32, 33, 50, 35, 52, 51, 52, 53, 38, 39, 40, 63, 48, 49, 34, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt490(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 20, 21, 6, 7, 8, 15, 0, 17, 2, 19, 4, 3, 4, 5, 22, 23, 24, 63, 48, 33, 50, 35, 52, 51, 52, 53, 38, 39, 40, 47, 32, 49, 34, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt491(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 20, 21, 6, 7, 8, 31, 0, 17, 2, 19, 4, 3, 4, 5, 22, 23, 24, 47, 48, 33, 50, 35, 52, 51, 52, 53, 38, 39, 40, 63, 32, 49, 34, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt492(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 20, 21, 6, 7, 8, 15, 16, 1, 2, 19, 4, 3, 4, 5, 22, 23, 24, 63, 32, 49, 50, 35, 52, 51, 52, 53, 38, 39, 40, 47, 48, 33, 34, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt493(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 20, 21, 6, 7, 8, 31, 16, 1, 2, 19, 4, 3, 4, 5, 22, 23, 24, 47, 32, 49, 50, 35, 52, 51, 52, 53, 38, 39, 40, 63, 48, 33, 34, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt494(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 20, 21, 6, 7, 8, 15, 0, 1, 2, 19, 4, 3, 4, 5, 22, 23, 24, 63, 48, 49, 50, 35, 52, 51, 52, 53, 38, 39, 40, 47, 32, 33, 34, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt495(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 20, 21, 6, 7, 8, 31, 0, 1, 2, 19, 4, 3, 4, 5, 22, 23, 24, 47, 48, 49, 50, 35, 52, 51, 52, 53, 38, 39, 40, 63, 32, 33, 34, 51, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt496(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 20, 21, 6, 7, 8, 15, 16, 17, 18, 3, 4, 3, 4, 5, 22, 23, 24, 63, 32, 33, 34, 51, 52, 51, 52, 53, 38, 39, 40, 47, 48, 49, 50, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt497(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 20, 21, 6, 7, 8, 31, 16, 17, 18, 3, 4, 3, 4, 5, 22, 23, 24, 47, 32, 33, 34, 51, 52, 51, 52, 53, 38, 39, 40, 63, 48, 49, 50, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt498(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 20, 21, 6, 7, 8, 15, 0, 17, 18, 3, 4, 3, 4, 5, 22, 23, 24, 63, 48, 33, 34, 51, 52, 51, 52, 53, 38, 39, 40, 47, 32, 49, 50, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt499(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 20, 21, 6, 7, 8, 31, 0, 17, 18, 3, 4, 3, 4, 5, 22, 23, 24, 47, 48, 33, 34, 51, 52, 51, 52, 53, 38, 39, 40, 63, 32, 49, 50, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt500(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 20, 21, 6, 7, 8, 15, 16, 1, 18, 3, 4, 3, 4, 5, 22, 23, 24, 63, 32, 49, 34, 51, 52, 51, 52, 53, 38, 39, 40, 47, 48, 33, 50, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt501(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 20, 21, 6, 7, 8, 31, 16, 1, 18, 3, 4, 3, 4, 5, 22, 23, 24, 47, 32, 49, 34, 51, 52, 51, 52, 53, 38, 39, 40, 63, 48, 33, 50, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt502(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 20, 21, 6, 7, 8, 15, 0, 1, 18, 3, 4, 3, 4, 5, 22, 23, 24, 63, 48, 49, 34, 51, 52, 51, 52, 53, 38, 39, 40, 47, 32, 33, 50, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt503(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 20, 21, 6, 7, 8, 31, 0, 1, 18, 3, 4, 3, 4, 5, 22, 23, 24, 47, 48, 49, 34, 51, 52, 51, 52, 53, 38, 39, 40, 63, 32, 33, 50, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt504(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 20, 21, 6, 7, 8, 15, 16, 17, 2, 3, 4, 3, 4, 5, 22, 23, 24, 63, 32, 33, 50, 51, 52, 51, 52, 53, 38, 39, 40, 47, 48, 49, 34, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt505(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 20, 21, 6, 7, 8, 31, 16, 17, 2, 3, 4, 3, 4, 5, 22, 23, 24, 47, 32, 33, 50, 51, 52, 51, 52, 53, 38, 39, 40, 63, 48, 49, 34, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt506(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 20, 21, 6, 7, 8, 15, 0, 17, 2, 3, 4, 3, 4, 5, 22, 23, 24, 63, 48, 33, 50, 51, 52, 51, 52, 53, 38, 39, 40, 47, 32, 49, 34, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt507(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 20, 21, 6, 7, 8, 31, 0, 17, 2, 3, 4, 3, 4, 5, 22, 23, 24, 47, 48, 33, 50, 51, 52, 51, 52, 53, 38, 39, 40, 63, 32, 49, 34, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt508(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 20, 21, 6, 7, 8, 15, 16, 1, 2, 3, 4, 3, 4, 5, 22, 23, 24, 63, 32, 49, 50, 51, 52, 51, 52, 53, 38, 39, 40, 47, 48, 33, 34, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt509(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 20, 21, 6, 7, 8, 31, 16, 1, 2, 3, 4, 3, 4, 5, 22, 23, 24, 47, 32, 49, 50, 51, 52, 51, 52, 53, 38, 39, 40, 63, 48, 33, 34, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt510(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 20, 21, 6, 7, 8, 15, 0, 1, 2, 3, 4, 3, 4, 5, 22, 23, 24, 63, 48, 49, 50, 51, 52, 51, 52, 53, 38, 39, 40, 47, 32, 33, 34, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt511(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 20, 21, 6, 7, 8, 31, 0, 1, 2, 3, 4, 3, 4, 5, 22, 23, 24, 47, 48, 49, 50, 51, 52, 51, 52, 53, 38, 39, 40, 63, 32, 33, 34, 35, 36, 35, 36, 37, 54, 55, 56>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } #endif // DESGPU_COMPILE_ALL_SALTS
661452f183f08c8119edb900c1c84620c4b30605.cu
#include "des_kernel_encrypt.h" #include "des_kernel_salt_instances.h" #ifdef DESGPU_COMPILE_ALL_SALTS void des_25_encrypt_salt384(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 20, 21, 6, 7, 8, 15, 16, 17, 18, 19, 20, 19, 4, 5, 22, 23, 24, 63, 32, 33, 34, 35, 36, 35, 52, 53, 38, 39, 40, 47, 48, 49, 50, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt385(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 20, 21, 6, 7, 8, 31, 16, 17, 18, 19, 20, 19, 4, 5, 22, 23, 24, 47, 32, 33, 34, 35, 36, 35, 52, 53, 38, 39, 40, 63, 48, 49, 50, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt386(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 20, 21, 6, 7, 8, 15, 0, 17, 18, 19, 20, 19, 4, 5, 22, 23, 24, 63, 48, 33, 34, 35, 36, 35, 52, 53, 38, 39, 40, 47, 32, 49, 50, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt387(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 20, 21, 6, 7, 8, 31, 0, 17, 18, 19, 20, 19, 4, 5, 22, 23, 24, 47, 48, 33, 34, 35, 36, 35, 52, 53, 38, 39, 40, 63, 32, 49, 50, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt388(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 20, 21, 6, 7, 8, 15, 16, 1, 18, 19, 20, 19, 4, 5, 22, 23, 24, 63, 32, 49, 34, 35, 36, 35, 52, 53, 38, 39, 40, 47, 48, 33, 50, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt389(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 20, 21, 6, 7, 8, 31, 16, 1, 18, 19, 20, 19, 4, 5, 22, 23, 24, 47, 32, 49, 34, 35, 36, 35, 52, 53, 38, 39, 40, 63, 48, 33, 50, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt390(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 20, 21, 6, 7, 8, 15, 0, 1, 18, 19, 20, 19, 4, 5, 22, 23, 24, 63, 48, 49, 34, 35, 36, 35, 52, 53, 38, 39, 40, 47, 32, 33, 50, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt391(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 20, 21, 6, 7, 8, 31, 0, 1, 18, 19, 20, 19, 4, 5, 22, 23, 24, 47, 48, 49, 34, 35, 36, 35, 52, 53, 38, 39, 40, 63, 32, 33, 50, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt392(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 20, 21, 6, 7, 8, 15, 16, 17, 2, 19, 20, 19, 4, 5, 22, 23, 24, 63, 32, 33, 50, 35, 36, 35, 52, 53, 38, 39, 40, 47, 48, 49, 34, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt393(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 20, 21, 6, 7, 8, 31, 16, 17, 2, 19, 20, 19, 4, 5, 22, 23, 24, 47, 32, 33, 50, 35, 36, 35, 52, 53, 38, 39, 40, 63, 48, 49, 34, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt394(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 20, 21, 6, 7, 8, 15, 0, 17, 2, 19, 20, 19, 4, 5, 22, 23, 24, 63, 48, 33, 50, 35, 36, 35, 52, 53, 38, 39, 40, 47, 32, 49, 34, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt395(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 20, 21, 6, 7, 8, 31, 0, 17, 2, 19, 20, 19, 4, 5, 22, 23, 24, 47, 48, 33, 50, 35, 36, 35, 52, 53, 38, 39, 40, 63, 32, 49, 34, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt396(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 20, 21, 6, 7, 8, 15, 16, 1, 2, 19, 20, 19, 4, 5, 22, 23, 24, 63, 32, 49, 50, 35, 36, 35, 52, 53, 38, 39, 40, 47, 48, 33, 34, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt397(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 20, 21, 6, 7, 8, 31, 16, 1, 2, 19, 20, 19, 4, 5, 22, 23, 24, 47, 32, 49, 50, 35, 36, 35, 52, 53, 38, 39, 40, 63, 48, 33, 34, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt398(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 20, 21, 6, 7, 8, 15, 0, 1, 2, 19, 20, 19, 4, 5, 22, 23, 24, 63, 48, 49, 50, 35, 36, 35, 52, 53, 38, 39, 40, 47, 32, 33, 34, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt399(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 20, 21, 6, 7, 8, 31, 0, 1, 2, 19, 20, 19, 4, 5, 22, 23, 24, 47, 48, 49, 50, 35, 36, 35, 52, 53, 38, 39, 40, 63, 32, 33, 34, 51, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt400(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 20, 21, 6, 7, 8, 15, 16, 17, 18, 3, 20, 19, 4, 5, 22, 23, 24, 63, 32, 33, 34, 51, 36, 35, 52, 53, 38, 39, 40, 47, 48, 49, 50, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt401(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 20, 21, 6, 7, 8, 31, 16, 17, 18, 3, 20, 19, 4, 5, 22, 23, 24, 47, 32, 33, 34, 51, 36, 35, 52, 53, 38, 39, 40, 63, 48, 49, 50, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt402(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 20, 21, 6, 7, 8, 15, 0, 17, 18, 3, 20, 19, 4, 5, 22, 23, 24, 63, 48, 33, 34, 51, 36, 35, 52, 53, 38, 39, 40, 47, 32, 49, 50, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt403(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 20, 21, 6, 7, 8, 31, 0, 17, 18, 3, 20, 19, 4, 5, 22, 23, 24, 47, 48, 33, 34, 51, 36, 35, 52, 53, 38, 39, 40, 63, 32, 49, 50, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt404(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 20, 21, 6, 7, 8, 15, 16, 1, 18, 3, 20, 19, 4, 5, 22, 23, 24, 63, 32, 49, 34, 51, 36, 35, 52, 53, 38, 39, 40, 47, 48, 33, 50, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt405(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 20, 21, 6, 7, 8, 31, 16, 1, 18, 3, 20, 19, 4, 5, 22, 23, 24, 47, 32, 49, 34, 51, 36, 35, 52, 53, 38, 39, 40, 63, 48, 33, 50, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt406(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 20, 21, 6, 7, 8, 15, 0, 1, 18, 3, 20, 19, 4, 5, 22, 23, 24, 63, 48, 49, 34, 51, 36, 35, 52, 53, 38, 39, 40, 47, 32, 33, 50, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt407(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 20, 21, 6, 7, 8, 31, 0, 1, 18, 3, 20, 19, 4, 5, 22, 23, 24, 47, 48, 49, 34, 51, 36, 35, 52, 53, 38, 39, 40, 63, 32, 33, 50, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt408(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 20, 21, 6, 7, 8, 15, 16, 17, 2, 3, 20, 19, 4, 5, 22, 23, 24, 63, 32, 33, 50, 51, 36, 35, 52, 53, 38, 39, 40, 47, 48, 49, 34, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt409(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 20, 21, 6, 7, 8, 31, 16, 17, 2, 3, 20, 19, 4, 5, 22, 23, 24, 47, 32, 33, 50, 51, 36, 35, 52, 53, 38, 39, 40, 63, 48, 49, 34, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt410(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 20, 21, 6, 7, 8, 15, 0, 17, 2, 3, 20, 19, 4, 5, 22, 23, 24, 63, 48, 33, 50, 51, 36, 35, 52, 53, 38, 39, 40, 47, 32, 49, 34, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt411(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 20, 21, 6, 7, 8, 31, 0, 17, 2, 3, 20, 19, 4, 5, 22, 23, 24, 47, 48, 33, 50, 51, 36, 35, 52, 53, 38, 39, 40, 63, 32, 49, 34, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt412(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 20, 21, 6, 7, 8, 15, 16, 1, 2, 3, 20, 19, 4, 5, 22, 23, 24, 63, 32, 49, 50, 51, 36, 35, 52, 53, 38, 39, 40, 47, 48, 33, 34, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt413(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 20, 21, 6, 7, 8, 31, 16, 1, 2, 3, 20, 19, 4, 5, 22, 23, 24, 47, 32, 49, 50, 51, 36, 35, 52, 53, 38, 39, 40, 63, 48, 33, 34, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt414(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 20, 21, 6, 7, 8, 15, 0, 1, 2, 3, 20, 19, 4, 5, 22, 23, 24, 63, 48, 49, 50, 51, 36, 35, 52, 53, 38, 39, 40, 47, 32, 33, 34, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt415(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 20, 21, 6, 7, 8, 31, 0, 1, 2, 3, 20, 19, 4, 5, 22, 23, 24, 47, 48, 49, 50, 51, 36, 35, 52, 53, 38, 39, 40, 63, 32, 33, 34, 35, 52, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt416(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 20, 21, 6, 7, 8, 15, 16, 17, 18, 19, 4, 19, 4, 5, 22, 23, 24, 63, 32, 33, 34, 35, 52, 35, 52, 53, 38, 39, 40, 47, 48, 49, 50, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt417(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 20, 21, 6, 7, 8, 31, 16, 17, 18, 19, 4, 19, 4, 5, 22, 23, 24, 47, 32, 33, 34, 35, 52, 35, 52, 53, 38, 39, 40, 63, 48, 49, 50, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt418(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 20, 21, 6, 7, 8, 15, 0, 17, 18, 19, 4, 19, 4, 5, 22, 23, 24, 63, 48, 33, 34, 35, 52, 35, 52, 53, 38, 39, 40, 47, 32, 49, 50, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt419(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 20, 21, 6, 7, 8, 31, 0, 17, 18, 19, 4, 19, 4, 5, 22, 23, 24, 47, 48, 33, 34, 35, 52, 35, 52, 53, 38, 39, 40, 63, 32, 49, 50, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt420(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 20, 21, 6, 7, 8, 15, 16, 1, 18, 19, 4, 19, 4, 5, 22, 23, 24, 63, 32, 49, 34, 35, 52, 35, 52, 53, 38, 39, 40, 47, 48, 33, 50, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt421(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 20, 21, 6, 7, 8, 31, 16, 1, 18, 19, 4, 19, 4, 5, 22, 23, 24, 47, 32, 49, 34, 35, 52, 35, 52, 53, 38, 39, 40, 63, 48, 33, 50, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt422(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 20, 21, 6, 7, 8, 15, 0, 1, 18, 19, 4, 19, 4, 5, 22, 23, 24, 63, 48, 49, 34, 35, 52, 35, 52, 53, 38, 39, 40, 47, 32, 33, 50, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt423(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 20, 21, 6, 7, 8, 31, 0, 1, 18, 19, 4, 19, 4, 5, 22, 23, 24, 47, 48, 49, 34, 35, 52, 35, 52, 53, 38, 39, 40, 63, 32, 33, 50, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt424(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 20, 21, 6, 7, 8, 15, 16, 17, 2, 19, 4, 19, 4, 5, 22, 23, 24, 63, 32, 33, 50, 35, 52, 35, 52, 53, 38, 39, 40, 47, 48, 49, 34, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt425(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 20, 21, 6, 7, 8, 31, 16, 17, 2, 19, 4, 19, 4, 5, 22, 23, 24, 47, 32, 33, 50, 35, 52, 35, 52, 53, 38, 39, 40, 63, 48, 49, 34, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt426(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 20, 21, 6, 7, 8, 15, 0, 17, 2, 19, 4, 19, 4, 5, 22, 23, 24, 63, 48, 33, 50, 35, 52, 35, 52, 53, 38, 39, 40, 47, 32, 49, 34, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt427(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 20, 21, 6, 7, 8, 31, 0, 17, 2, 19, 4, 19, 4, 5, 22, 23, 24, 47, 48, 33, 50, 35, 52, 35, 52, 53, 38, 39, 40, 63, 32, 49, 34, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt428(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 20, 21, 6, 7, 8, 15, 16, 1, 2, 19, 4, 19, 4, 5, 22, 23, 24, 63, 32, 49, 50, 35, 52, 35, 52, 53, 38, 39, 40, 47, 48, 33, 34, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt429(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 20, 21, 6, 7, 8, 31, 16, 1, 2, 19, 4, 19, 4, 5, 22, 23, 24, 47, 32, 49, 50, 35, 52, 35, 52, 53, 38, 39, 40, 63, 48, 33, 34, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt430(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 20, 21, 6, 7, 8, 15, 0, 1, 2, 19, 4, 19, 4, 5, 22, 23, 24, 63, 48, 49, 50, 35, 52, 35, 52, 53, 38, 39, 40, 47, 32, 33, 34, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt431(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 20, 21, 6, 7, 8, 31, 0, 1, 2, 19, 4, 19, 4, 5, 22, 23, 24, 47, 48, 49, 50, 35, 52, 35, 52, 53, 38, 39, 40, 63, 32, 33, 34, 51, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt432(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 20, 21, 6, 7, 8, 15, 16, 17, 18, 3, 4, 19, 4, 5, 22, 23, 24, 63, 32, 33, 34, 51, 52, 35, 52, 53, 38, 39, 40, 47, 48, 49, 50, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt433(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 20, 21, 6, 7, 8, 31, 16, 17, 18, 3, 4, 19, 4, 5, 22, 23, 24, 47, 32, 33, 34, 51, 52, 35, 52, 53, 38, 39, 40, 63, 48, 49, 50, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt434(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 20, 21, 6, 7, 8, 15, 0, 17, 18, 3, 4, 19, 4, 5, 22, 23, 24, 63, 48, 33, 34, 51, 52, 35, 52, 53, 38, 39, 40, 47, 32, 49, 50, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt435(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 20, 21, 6, 7, 8, 31, 0, 17, 18, 3, 4, 19, 4, 5, 22, 23, 24, 47, 48, 33, 34, 51, 52, 35, 52, 53, 38, 39, 40, 63, 32, 49, 50, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt436(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 20, 21, 6, 7, 8, 15, 16, 1, 18, 3, 4, 19, 4, 5, 22, 23, 24, 63, 32, 49, 34, 51, 52, 35, 52, 53, 38, 39, 40, 47, 48, 33, 50, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt437(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 20, 21, 6, 7, 8, 31, 16, 1, 18, 3, 4, 19, 4, 5, 22, 23, 24, 47, 32, 49, 34, 51, 52, 35, 52, 53, 38, 39, 40, 63, 48, 33, 50, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt438(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 20, 21, 6, 7, 8, 15, 0, 1, 18, 3, 4, 19, 4, 5, 22, 23, 24, 63, 48, 49, 34, 51, 52, 35, 52, 53, 38, 39, 40, 47, 32, 33, 50, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt439(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 20, 21, 6, 7, 8, 31, 0, 1, 18, 3, 4, 19, 4, 5, 22, 23, 24, 47, 48, 49, 34, 51, 52, 35, 52, 53, 38, 39, 40, 63, 32, 33, 50, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt440(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 20, 21, 6, 7, 8, 15, 16, 17, 2, 3, 4, 19, 4, 5, 22, 23, 24, 63, 32, 33, 50, 51, 52, 35, 52, 53, 38, 39, 40, 47, 48, 49, 34, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt441(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 20, 21, 6, 7, 8, 31, 16, 17, 2, 3, 4, 19, 4, 5, 22, 23, 24, 47, 32, 33, 50, 51, 52, 35, 52, 53, 38, 39, 40, 63, 48, 49, 34, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt442(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 20, 21, 6, 7, 8, 15, 0, 17, 2, 3, 4, 19, 4, 5, 22, 23, 24, 63, 48, 33, 50, 51, 52, 35, 52, 53, 38, 39, 40, 47, 32, 49, 34, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt443(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 20, 21, 6, 7, 8, 31, 0, 17, 2, 3, 4, 19, 4, 5, 22, 23, 24, 47, 48, 33, 50, 51, 52, 35, 52, 53, 38, 39, 40, 63, 32, 49, 34, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt444(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 20, 21, 6, 7, 8, 15, 16, 1, 2, 3, 4, 19, 4, 5, 22, 23, 24, 63, 32, 49, 50, 51, 52, 35, 52, 53, 38, 39, 40, 47, 48, 33, 34, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt445(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 20, 21, 6, 7, 8, 31, 16, 1, 2, 3, 4, 19, 4, 5, 22, 23, 24, 47, 32, 49, 50, 51, 52, 35, 52, 53, 38, 39, 40, 63, 48, 33, 34, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt446(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 20, 21, 6, 7, 8, 15, 0, 1, 2, 3, 4, 19, 4, 5, 22, 23, 24, 63, 48, 49, 50, 51, 52, 35, 52, 53, 38, 39, 40, 47, 32, 33, 34, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt447(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 20, 21, 6, 7, 8, 31, 0, 1, 2, 3, 4, 19, 4, 5, 22, 23, 24, 47, 48, 49, 50, 51, 52, 35, 52, 53, 38, 39, 40, 63, 32, 33, 34, 35, 36, 51, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt448(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 20, 21, 6, 7, 8, 15, 16, 17, 18, 19, 20, 3, 4, 5, 22, 23, 24, 63, 32, 33, 34, 35, 36, 51, 52, 53, 38, 39, 40, 47, 48, 49, 50, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt449(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 20, 21, 6, 7, 8, 31, 16, 17, 18, 19, 20, 3, 4, 5, 22, 23, 24, 47, 32, 33, 34, 35, 36, 51, 52, 53, 38, 39, 40, 63, 48, 49, 50, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt450(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 20, 21, 6, 7, 8, 15, 0, 17, 18, 19, 20, 3, 4, 5, 22, 23, 24, 63, 48, 33, 34, 35, 36, 51, 52, 53, 38, 39, 40, 47, 32, 49, 50, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt451(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 20, 21, 6, 7, 8, 31, 0, 17, 18, 19, 20, 3, 4, 5, 22, 23, 24, 47, 48, 33, 34, 35, 36, 51, 52, 53, 38, 39, 40, 63, 32, 49, 50, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt452(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 20, 21, 6, 7, 8, 15, 16, 1, 18, 19, 20, 3, 4, 5, 22, 23, 24, 63, 32, 49, 34, 35, 36, 51, 52, 53, 38, 39, 40, 47, 48, 33, 50, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt453(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 20, 21, 6, 7, 8, 31, 16, 1, 18, 19, 20, 3, 4, 5, 22, 23, 24, 47, 32, 49, 34, 35, 36, 51, 52, 53, 38, 39, 40, 63, 48, 33, 50, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt454(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 20, 21, 6, 7, 8, 15, 0, 1, 18, 19, 20, 3, 4, 5, 22, 23, 24, 63, 48, 49, 34, 35, 36, 51, 52, 53, 38, 39, 40, 47, 32, 33, 50, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt455(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 20, 21, 6, 7, 8, 31, 0, 1, 18, 19, 20, 3, 4, 5, 22, 23, 24, 47, 48, 49, 34, 35, 36, 51, 52, 53, 38, 39, 40, 63, 32, 33, 50, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt456(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 20, 21, 6, 7, 8, 15, 16, 17, 2, 19, 20, 3, 4, 5, 22, 23, 24, 63, 32, 33, 50, 35, 36, 51, 52, 53, 38, 39, 40, 47, 48, 49, 34, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt457(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 20, 21, 6, 7, 8, 31, 16, 17, 2, 19, 20, 3, 4, 5, 22, 23, 24, 47, 32, 33, 50, 35, 36, 51, 52, 53, 38, 39, 40, 63, 48, 49, 34, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt458(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 20, 21, 6, 7, 8, 15, 0, 17, 2, 19, 20, 3, 4, 5, 22, 23, 24, 63, 48, 33, 50, 35, 36, 51, 52, 53, 38, 39, 40, 47, 32, 49, 34, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt459(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 20, 21, 6, 7, 8, 31, 0, 17, 2, 19, 20, 3, 4, 5, 22, 23, 24, 47, 48, 33, 50, 35, 36, 51, 52, 53, 38, 39, 40, 63, 32, 49, 34, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt460(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 20, 21, 6, 7, 8, 15, 16, 1, 2, 19, 20, 3, 4, 5, 22, 23, 24, 63, 32, 49, 50, 35, 36, 51, 52, 53, 38, 39, 40, 47, 48, 33, 34, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt461(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 20, 21, 6, 7, 8, 31, 16, 1, 2, 19, 20, 3, 4, 5, 22, 23, 24, 47, 32, 49, 50, 35, 36, 51, 52, 53, 38, 39, 40, 63, 48, 33, 34, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt462(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 20, 21, 6, 7, 8, 15, 0, 1, 2, 19, 20, 3, 4, 5, 22, 23, 24, 63, 48, 49, 50, 35, 36, 51, 52, 53, 38, 39, 40, 47, 32, 33, 34, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt463(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 20, 21, 6, 7, 8, 31, 0, 1, 2, 19, 20, 3, 4, 5, 22, 23, 24, 47, 48, 49, 50, 35, 36, 51, 52, 53, 38, 39, 40, 63, 32, 33, 34, 51, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt464(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 20, 21, 6, 7, 8, 15, 16, 17, 18, 3, 20, 3, 4, 5, 22, 23, 24, 63, 32, 33, 34, 51, 36, 51, 52, 53, 38, 39, 40, 47, 48, 49, 50, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt465(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 20, 21, 6, 7, 8, 31, 16, 17, 18, 3, 20, 3, 4, 5, 22, 23, 24, 47, 32, 33, 34, 51, 36, 51, 52, 53, 38, 39, 40, 63, 48, 49, 50, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt466(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 20, 21, 6, 7, 8, 15, 0, 17, 18, 3, 20, 3, 4, 5, 22, 23, 24, 63, 48, 33, 34, 51, 36, 51, 52, 53, 38, 39, 40, 47, 32, 49, 50, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt467(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 20, 21, 6, 7, 8, 31, 0, 17, 18, 3, 20, 3, 4, 5, 22, 23, 24, 47, 48, 33, 34, 51, 36, 51, 52, 53, 38, 39, 40, 63, 32, 49, 50, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt468(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 20, 21, 6, 7, 8, 15, 16, 1, 18, 3, 20, 3, 4, 5, 22, 23, 24, 63, 32, 49, 34, 51, 36, 51, 52, 53, 38, 39, 40, 47, 48, 33, 50, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt469(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 20, 21, 6, 7, 8, 31, 16, 1, 18, 3, 20, 3, 4, 5, 22, 23, 24, 47, 32, 49, 34, 51, 36, 51, 52, 53, 38, 39, 40, 63, 48, 33, 50, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt470(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 20, 21, 6, 7, 8, 15, 0, 1, 18, 3, 20, 3, 4, 5, 22, 23, 24, 63, 48, 49, 34, 51, 36, 51, 52, 53, 38, 39, 40, 47, 32, 33, 50, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt471(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 20, 21, 6, 7, 8, 31, 0, 1, 18, 3, 20, 3, 4, 5, 22, 23, 24, 47, 48, 49, 34, 51, 36, 51, 52, 53, 38, 39, 40, 63, 32, 33, 50, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt472(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 20, 21, 6, 7, 8, 15, 16, 17, 2, 3, 20, 3, 4, 5, 22, 23, 24, 63, 32, 33, 50, 51, 36, 51, 52, 53, 38, 39, 40, 47, 48, 49, 34, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt473(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 20, 21, 6, 7, 8, 31, 16, 17, 2, 3, 20, 3, 4, 5, 22, 23, 24, 47, 32, 33, 50, 51, 36, 51, 52, 53, 38, 39, 40, 63, 48, 49, 34, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt474(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 20, 21, 6, 7, 8, 15, 0, 17, 2, 3, 20, 3, 4, 5, 22, 23, 24, 63, 48, 33, 50, 51, 36, 51, 52, 53, 38, 39, 40, 47, 32, 49, 34, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt475(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 20, 21, 6, 7, 8, 31, 0, 17, 2, 3, 20, 3, 4, 5, 22, 23, 24, 47, 48, 33, 50, 51, 36, 51, 52, 53, 38, 39, 40, 63, 32, 49, 34, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt476(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 20, 21, 6, 7, 8, 15, 16, 1, 2, 3, 20, 3, 4, 5, 22, 23, 24, 63, 32, 49, 50, 51, 36, 51, 52, 53, 38, 39, 40, 47, 48, 33, 34, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt477(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 20, 21, 6, 7, 8, 31, 16, 1, 2, 3, 20, 3, 4, 5, 22, 23, 24, 47, 32, 49, 50, 51, 36, 51, 52, 53, 38, 39, 40, 63, 48, 33, 34, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt478(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 20, 21, 6, 7, 8, 15, 0, 1, 2, 3, 20, 3, 4, 5, 22, 23, 24, 63, 48, 49, 50, 51, 36, 51, 52, 53, 38, 39, 40, 47, 32, 33, 34, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt479(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 20, 21, 6, 7, 8, 31, 0, 1, 2, 3, 20, 3, 4, 5, 22, 23, 24, 47, 48, 49, 50, 51, 36, 51, 52, 53, 38, 39, 40, 63, 32, 33, 34, 35, 52, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt480(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 20, 21, 6, 7, 8, 15, 16, 17, 18, 19, 4, 3, 4, 5, 22, 23, 24, 63, 32, 33, 34, 35, 52, 51, 52, 53, 38, 39, 40, 47, 48, 49, 50, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt481(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 20, 21, 6, 7, 8, 31, 16, 17, 18, 19, 4, 3, 4, 5, 22, 23, 24, 47, 32, 33, 34, 35, 52, 51, 52, 53, 38, 39, 40, 63, 48, 49, 50, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt482(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 20, 21, 6, 7, 8, 15, 0, 17, 18, 19, 4, 3, 4, 5, 22, 23, 24, 63, 48, 33, 34, 35, 52, 51, 52, 53, 38, 39, 40, 47, 32, 49, 50, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt483(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 20, 21, 6, 7, 8, 31, 0, 17, 18, 19, 4, 3, 4, 5, 22, 23, 24, 47, 48, 33, 34, 35, 52, 51, 52, 53, 38, 39, 40, 63, 32, 49, 50, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt484(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 20, 21, 6, 7, 8, 15, 16, 1, 18, 19, 4, 3, 4, 5, 22, 23, 24, 63, 32, 49, 34, 35, 52, 51, 52, 53, 38, 39, 40, 47, 48, 33, 50, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt485(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 20, 21, 6, 7, 8, 31, 16, 1, 18, 19, 4, 3, 4, 5, 22, 23, 24, 47, 32, 49, 34, 35, 52, 51, 52, 53, 38, 39, 40, 63, 48, 33, 50, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt486(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 20, 21, 6, 7, 8, 15, 0, 1, 18, 19, 4, 3, 4, 5, 22, 23, 24, 63, 48, 49, 34, 35, 52, 51, 52, 53, 38, 39, 40, 47, 32, 33, 50, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt487(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 20, 21, 6, 7, 8, 31, 0, 1, 18, 19, 4, 3, 4, 5, 22, 23, 24, 47, 48, 49, 34, 35, 52, 51, 52, 53, 38, 39, 40, 63, 32, 33, 50, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt488(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 20, 21, 6, 7, 8, 15, 16, 17, 2, 19, 4, 3, 4, 5, 22, 23, 24, 63, 32, 33, 50, 35, 52, 51, 52, 53, 38, 39, 40, 47, 48, 49, 34, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt489(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 20, 21, 6, 7, 8, 31, 16, 17, 2, 19, 4, 3, 4, 5, 22, 23, 24, 47, 32, 33, 50, 35, 52, 51, 52, 53, 38, 39, 40, 63, 48, 49, 34, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt490(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 20, 21, 6, 7, 8, 15, 0, 17, 2, 19, 4, 3, 4, 5, 22, 23, 24, 63, 48, 33, 50, 35, 52, 51, 52, 53, 38, 39, 40, 47, 32, 49, 34, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt491(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 20, 21, 6, 7, 8, 31, 0, 17, 2, 19, 4, 3, 4, 5, 22, 23, 24, 47, 48, 33, 50, 35, 52, 51, 52, 53, 38, 39, 40, 63, 32, 49, 34, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt492(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 20, 21, 6, 7, 8, 15, 16, 1, 2, 19, 4, 3, 4, 5, 22, 23, 24, 63, 32, 49, 50, 35, 52, 51, 52, 53, 38, 39, 40, 47, 48, 33, 34, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt493(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 20, 21, 6, 7, 8, 31, 16, 1, 2, 19, 4, 3, 4, 5, 22, 23, 24, 47, 32, 49, 50, 35, 52, 51, 52, 53, 38, 39, 40, 63, 48, 33, 34, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt494(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 20, 21, 6, 7, 8, 15, 0, 1, 2, 19, 4, 3, 4, 5, 22, 23, 24, 63, 48, 49, 50, 35, 52, 51, 52, 53, 38, 39, 40, 47, 32, 33, 34, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt495(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 20, 21, 6, 7, 8, 31, 0, 1, 2, 19, 4, 3, 4, 5, 22, 23, 24, 47, 48, 49, 50, 35, 52, 51, 52, 53, 38, 39, 40, 63, 32, 33, 34, 51, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt496(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 20, 21, 6, 7, 8, 15, 16, 17, 18, 3, 4, 3, 4, 5, 22, 23, 24, 63, 32, 33, 34, 51, 52, 51, 52, 53, 38, 39, 40, 47, 48, 49, 50, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt497(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 20, 21, 6, 7, 8, 31, 16, 17, 18, 3, 4, 3, 4, 5, 22, 23, 24, 47, 32, 33, 34, 51, 52, 51, 52, 53, 38, 39, 40, 63, 48, 49, 50, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt498(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 20, 21, 6, 7, 8, 15, 0, 17, 18, 3, 4, 3, 4, 5, 22, 23, 24, 63, 48, 33, 34, 51, 52, 51, 52, 53, 38, 39, 40, 47, 32, 49, 50, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt499(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 20, 21, 6, 7, 8, 31, 0, 17, 18, 3, 4, 3, 4, 5, 22, 23, 24, 47, 48, 33, 34, 51, 52, 51, 52, 53, 38, 39, 40, 63, 32, 49, 50, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt500(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 20, 21, 6, 7, 8, 15, 16, 1, 18, 3, 4, 3, 4, 5, 22, 23, 24, 63, 32, 49, 34, 51, 52, 51, 52, 53, 38, 39, 40, 47, 48, 33, 50, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt501(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 20, 21, 6, 7, 8, 31, 16, 1, 18, 3, 4, 3, 4, 5, 22, 23, 24, 47, 32, 49, 34, 51, 52, 51, 52, 53, 38, 39, 40, 63, 48, 33, 50, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt502(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 20, 21, 6, 7, 8, 15, 0, 1, 18, 3, 4, 3, 4, 5, 22, 23, 24, 63, 48, 49, 34, 51, 52, 51, 52, 53, 38, 39, 40, 47, 32, 33, 50, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt503(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 20, 21, 6, 7, 8, 31, 0, 1, 18, 3, 4, 3, 4, 5, 22, 23, 24, 47, 48, 49, 34, 51, 52, 51, 52, 53, 38, 39, 40, 63, 32, 33, 50, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt504(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 20, 21, 6, 7, 8, 15, 16, 17, 2, 3, 4, 3, 4, 5, 22, 23, 24, 63, 32, 33, 50, 51, 52, 51, 52, 53, 38, 39, 40, 47, 48, 49, 34, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt505(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 20, 21, 6, 7, 8, 31, 16, 17, 2, 3, 4, 3, 4, 5, 22, 23, 24, 47, 32, 33, 50, 51, 52, 51, 52, 53, 38, 39, 40, 63, 48, 49, 34, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt506(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 20, 21, 6, 7, 8, 15, 0, 17, 2, 3, 4, 3, 4, 5, 22, 23, 24, 63, 48, 33, 50, 51, 52, 51, 52, 53, 38, 39, 40, 47, 32, 49, 34, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt507(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 20, 21, 6, 7, 8, 31, 0, 17, 2, 3, 4, 3, 4, 5, 22, 23, 24, 47, 48, 33, 50, 51, 52, 51, 52, 53, 38, 39, 40, 63, 32, 49, 34, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt508(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 20, 21, 6, 7, 8, 15, 16, 1, 2, 3, 4, 3, 4, 5, 22, 23, 24, 63, 32, 49, 50, 51, 52, 51, 52, 53, 38, 39, 40, 47, 48, 33, 34, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt509(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 20, 21, 6, 7, 8, 31, 16, 1, 2, 3, 4, 3, 4, 5, 22, 23, 24, 47, 32, 49, 50, 51, 52, 51, 52, 53, 38, 39, 40, 63, 48, 33, 34, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt510(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 20, 21, 6, 7, 8, 15, 0, 1, 2, 3, 4, 3, 4, 5, 22, 23, 24, 63, 48, 49, 50, 51, 52, 51, 52, 53, 38, 39, 40, 47, 32, 33, 34, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt511(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 20, 21, 6, 7, 8, 31, 0, 1, 2, 3, 4, 3, 4, 5, 22, 23, 24, 47, 48, 49, 50, 51, 52, 51, 52, 53, 38, 39, 40, 63, 32, 33, 34, 35, 36, 35, 36, 37, 54, 55, 56><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } #endif // DESGPU_COMPILE_ALL_SALTS
bdd513f9297c1f74f4bedef43ba0bc5f1518bcbe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "conv.cuh" #include <iostream> void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); getchar(); exit( EXIT_FAILURE ); } } void CheckError(void) { #ifdef _DEBUG_PRINTS_ hipDeviceSynchronize(); HANDLE_ERROR( hipPeekAtLastError() ); #endif } __constant__ float d_kernel[81]; __inline__ __device__ uchar4 getRGBA(const uchar* fSource, const int fImageWidth, const int fImageHeight, const int row, const int col) { uchar4 retVal; int ii = row<0 ? 0 : row; int jj = col>=fImageHeight ? fImageHeight-1 : col; int indx= 4*(jj*fImageWidth + ii); retVal.x= fSource[indx+2]; retVal.y= fSource[indx+1]; retVal.z= fSource[indx+0]; retVal.w= fSource[indx+3]; return retVal; } __inline__ __device__ void setRGBA(uchar* fDestination, const int fImageWidth, const int fImageHeight, const int row, const int col, float4 value) { int ii = row<0 ? 0 : row; int jj = col>=fImageHeight ? fImageHeight-1 : col; int indx= 4*(jj*fImageWidth + ii); fDestination[indx+2] = value.x; fDestination[indx+1] = value.y; fDestination[indx+0] = value.z; fDestination[indx+3] = value.w; } __global__ void convolveKernel(const uchar* fSource, int fImageWidth, int fImageHeight, uchar* fDestination, int fKernelSize) { extern __shared__ uchar shared[]; const int PADDING = 2*fKernelSize; int slen = blockDim.x+PADDING; int klen = 2*fKernelSize+1; int gx = threadIdx.x + blockDim.x * blockIdx.x; int gy = threadIdx.y + blockDim.y * blockIdx.y; int sidx = 4*(threadIdx.y*slen+threadIdx.x); uchar4 pxl = getRGBA(fSource,fImageWidth,fImageHeight, gx-fKernelSize,gy-fKernelSize); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; int ti = threadIdx.x + fKernelSize; int tj = threadIdx.y + fKernelSize; int lx2 = threadIdx.x + blockDim.x; int ly2 = threadIdx.y + blockDim.y; int gx2 = gx + blockDim.x; int gy2 = gy + blockDim.y; if( threadIdx.x < PADDING ) { pxl = getRGBA(fSource,fImageWidth,fImageHeight, gx2-fKernelSize,gy-fKernelSize); sidx= 4*(threadIdx.y*slen+lx2); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } if( threadIdx.y < PADDING ) { pxl = getRGBA(fSource,fImageWidth,fImageHeight, gx-fKernelSize,gy2-fKernelSize); sidx= 4*(ly2*slen+threadIdx.x); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } if( threadIdx.x < PADDING && threadIdx.y < PADDING ) { pxl = getRGBA(fSource,fImageWidth,fImageHeight, gx2-fKernelSize,gy2-fKernelSize); sidx= 4*(ly2*slen+lx2); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } __syncthreads(); // Now that the image has been read // into shared memory completely. // Check for image bounds and exit if( gx >= fImageWidth || gy >= fImageHeight ) return; sidx = 4*(tj*slen+ti); uchar* ptr = shared + sidx; float4 accum = {0.0f,0.0f,0.0f,0.0f}; for( int jj=-fKernelSize; jj<=fKernelSize; jj++ ) { for( int ii=-fKernelSize; ii<=fKernelSize; ii++ ) { int tmpidx = 4*(jj*slen+ii); float weight= d_kernel[(fKernelSize+jj)*klen+(fKernelSize+ii)]; accum.x += weight*ptr[tmpidx+0]; accum.y += weight*ptr[tmpidx+1]; accum.z += weight*ptr[tmpidx+2]; } } accum.w = shared[sidx+3]; setRGBA(fDestination,fImageWidth,fImageHeight,gx,gy,accum); } __global__ void convolveKernel(hipTextureObject_t fSource, int fImageWidth, int fImageHeight, uchar* fDestination, int fKernelSize) { extern __shared__ uchar shared[]; const int PADDING = 2*fKernelSize; int slen = blockDim.x + PADDING; int klen = PADDING + 1; int gx = threadIdx.x + blockDim.x * blockIdx.x; int gy = threadIdx.y + blockDim.y * blockIdx.y; int sidx = 4*(threadIdx.y*slen+threadIdx.x); uchar4 pxl = tex2D<uchar4>(fSource,gx-fKernelSize,gy-fKernelSize); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; int ti = threadIdx.x + fKernelSize; int tj = threadIdx.y + fKernelSize; int lx2 = threadIdx.x + blockDim.x; int ly2 = threadIdx.y + blockDim.y; int gx2 = gx + blockDim.x; int gy2 = gy + blockDim.y; if( threadIdx.x < PADDING ) { pxl = tex2D<uchar4>(fSource,gx2-fKernelSize,gy-fKernelSize); sidx= 4*(threadIdx.y*slen+lx2); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } if( threadIdx.y < PADDING ) { pxl = tex2D<uchar4>(fSource,gx-fKernelSize,gy2-fKernelSize); sidx= 4*(ly2*slen+threadIdx.x); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } if( threadIdx.x < PADDING && threadIdx.y < PADDING ) { pxl = tex2D<uchar4>(fSource,gx2-fKernelSize,gy2-fKernelSize); sidx= 4*(ly2*slen+lx2); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } __syncthreads(); // Now that the image has been read // into shared memory completely. // Check for image bounds and exit if( gx >= fImageWidth || gy >= fImageHeight ) return; sidx = 4*(tj*slen+ti); uchar* ptr = shared + sidx; float4 accum = {0.0f,0.0f,0.0f,0.0f}; for( int jj=-fKernelSize; jj<=fKernelSize; jj++ ) { for( int ii=-fKernelSize; ii<=fKernelSize; ii++ ) { int tmpidx = 4*(jj*slen+ii); float weight= d_kernel[(fKernelSize+jj)*klen+(fKernelSize+ii)]; accum.x += weight*ptr[tmpidx+0]; accum.y += weight*ptr[tmpidx+1]; accum.z += weight*ptr[tmpidx+2]; } } accum.w = shared[sidx+3]; setRGBA(fDestination,fImageWidth,fImageHeight,gx,gy,accum); } int ceil(int numer, int denom) { return (numer/denom + (numer % denom != 0)); } MemObject::MemObject() { dev_SourceImage = 0; dev_ConvolvedImage = 0; cuImgArray = 0; // CUDA texture specification memset(&resDesc,0,sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; // CUDA texture object parameters memset(&texDesc,0,sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeWrap; texDesc.addressMode[1] = hipAddressModeWrap; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; texDesc.normalizedCoords= 0; texObj = 0; } void MemObject::cleanMemory() { #if USE_CUDA_TEX_OBJECT if (texObj) { hipDestroyTextureObject(texObj); } if (cuImgArray) { hipFreeArray(cuImgArray); } cuImgArray = 0; texObj = 0; #else if (dev_SourceImage) { hipFree(dev_SourceImage); } #endif if (dev_ConvolvedImage) { hipFree(dev_ConvolvedImage); } dev_SourceImage = 0; dev_ConvolvedImage = 0; } MemObject::~MemObject() { cleanMemory(); } MemObject* getMemObject(void) { static MemObject* handle = 0; if( handle == 0 ) { handle = new MemObject(); } return handle; } void initMemObject(void) { getMemObject(); } void setKernelOnDevice(float const * elements, const int count) { HANDLE_ERROR( hipMemcpyToSymbol(d_kernel, elements, count*sizeof(float)) ); CHECK_CUDA_ERRORS(); } void setImageOnDevice(const uchar * image_data, const int image_width, const int image_height) { MemObject* handle = getMemObject(); handle->cleanMemory(); handle->mImageWidth = image_width; handle->mImageHeight = image_height; #if USE_CUDA_TEX_OBJECT handle->channelDesc = hipCreateChannelDesc<uchar4>(); HANDLE_ERROR( hipMallocArray(&(handle->cuImgArray), &(handle->channelDesc), image_width, image_height) ); HANDLE_ERROR( hipMemcpyToArray(handle->cuImgArray, 0,0, image_data, image_width*image_height*4*sizeof(uchar), hipMemcpyHostToDevice) ); handle->resDesc.res.array.array = handle->cuImgArray; hipCreateTextureObject(&(handle->texObj),&(handle->resDesc),&(handle->texDesc),NULL); #else /* Allocate memory on device to hold image data */ HANDLE_ERROR( hipMalloc((void**)&handle->dev_SourceImage, image_width*image_height*4*sizeof(uchar)) ); CHECK_CUDA_ERRORS(); /* Copy this data to device memory for kernel computation */ HANDLE_ERROR( hipMemcpy( handle->dev_SourceImage, image_data, image_width*image_height*4*sizeof(uchar), hipMemcpyHostToDevice) ); CHECK_CUDA_ERRORS(); #endif /* Allocate memory for output image on GPU device memory */ HANDLE_ERROR( hipMalloc((void**)&handle->dev_ConvolvedImage, image_width*image_height*4*sizeof(uchar)) ); CHECK_CUDA_ERRORS(); } void convolve(const int kernel_radius) { static dim3 mThreadsPerBlock(TILE_WIDTH,TILE_HEIGHT); MemObject* handle = getMemObject(); int image_width = handle->mImageWidth; int image_height = handle->mImageHeight; dim3 mGrid(ceil(image_width, mThreadsPerBlock.x), ceil(image_height, mThreadsPerBlock.y)); int sharedMemSize = (mThreadsPerBlock.y+2*kernel_radius)* (mThreadsPerBlock.x+2*kernel_radius)* 4*sizeof(uchar); #ifdef _DEBUG_PRINTS_ std::cout<<"Threads per block "<<mThreadsPerBlock.x<<","<<mThreadsPerBlock.y<<std::endl; std::cout<<"Blocks per grid "<<mGrid.x<<","<<mGrid.y<<std::endl;; std::cout<<"Shared memory usage : "<<sharedMemSize<<" Bytes"<<std::endl;; #endif #if USE_CUDA_TEX_OBJECT hipLaunchKernelGGL(( convolveKernel), dim3(mGrid),dim3(mThreadsPerBlock),sharedMemSize, 0, handle->texObj, image_width, image_height, handle->dev_ConvolvedImage, kernel_radius); #else hipLaunchKernelGGL(( convolveKernel), dim3(mGrid),dim3(mThreadsPerBlock),sharedMemSize, 0, handle->dev_SourceImage, image_width, image_height, handle->dev_ConvolvedImage, kernel_radius); #endif CHECK_CUDA_ERRORS(); hipDeviceSynchronize(); } void memCpyImageDeviceToHost(uchar* host_ptr) { MemObject* handle = getMemObject(); HANDLE_ERROR( hipMemcpy(host_ptr, handle->dev_ConvolvedImage, handle->mImageWidth*handle->mImageHeight*4*sizeof(uchar), hipMemcpyDeviceToHost) ); CHECK_CUDA_ERRORS(); }
bdd513f9297c1f74f4bedef43ba0bc5f1518bcbe.cu
#include "conv.cuh" #include <iostream> void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); getchar(); exit( EXIT_FAILURE ); } } void CheckError(void) { #ifdef _DEBUG_PRINTS_ cudaDeviceSynchronize(); HANDLE_ERROR( cudaPeekAtLastError() ); #endif } __constant__ float d_kernel[81]; __inline__ __device__ uchar4 getRGBA(const uchar* fSource, const int fImageWidth, const int fImageHeight, const int row, const int col) { uchar4 retVal; int ii = row<0 ? 0 : row; int jj = col>=fImageHeight ? fImageHeight-1 : col; int indx= 4*(jj*fImageWidth + ii); retVal.x= fSource[indx+2]; retVal.y= fSource[indx+1]; retVal.z= fSource[indx+0]; retVal.w= fSource[indx+3]; return retVal; } __inline__ __device__ void setRGBA(uchar* fDestination, const int fImageWidth, const int fImageHeight, const int row, const int col, float4 value) { int ii = row<0 ? 0 : row; int jj = col>=fImageHeight ? fImageHeight-1 : col; int indx= 4*(jj*fImageWidth + ii); fDestination[indx+2] = value.x; fDestination[indx+1] = value.y; fDestination[indx+0] = value.z; fDestination[indx+3] = value.w; } __global__ void convolveKernel(const uchar* fSource, int fImageWidth, int fImageHeight, uchar* fDestination, int fKernelSize) { extern __shared__ uchar shared[]; const int PADDING = 2*fKernelSize; int slen = blockDim.x+PADDING; int klen = 2*fKernelSize+1; int gx = threadIdx.x + blockDim.x * blockIdx.x; int gy = threadIdx.y + blockDim.y * blockIdx.y; int sidx = 4*(threadIdx.y*slen+threadIdx.x); uchar4 pxl = getRGBA(fSource,fImageWidth,fImageHeight, gx-fKernelSize,gy-fKernelSize); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; int ti = threadIdx.x + fKernelSize; int tj = threadIdx.y + fKernelSize; int lx2 = threadIdx.x + blockDim.x; int ly2 = threadIdx.y + blockDim.y; int gx2 = gx + blockDim.x; int gy2 = gy + blockDim.y; if( threadIdx.x < PADDING ) { pxl = getRGBA(fSource,fImageWidth,fImageHeight, gx2-fKernelSize,gy-fKernelSize); sidx= 4*(threadIdx.y*slen+lx2); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } if( threadIdx.y < PADDING ) { pxl = getRGBA(fSource,fImageWidth,fImageHeight, gx-fKernelSize,gy2-fKernelSize); sidx= 4*(ly2*slen+threadIdx.x); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } if( threadIdx.x < PADDING && threadIdx.y < PADDING ) { pxl = getRGBA(fSource,fImageWidth,fImageHeight, gx2-fKernelSize,gy2-fKernelSize); sidx= 4*(ly2*slen+lx2); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } __syncthreads(); // Now that the image has been read // into shared memory completely. // Check for image bounds and exit if( gx >= fImageWidth || gy >= fImageHeight ) return; sidx = 4*(tj*slen+ti); uchar* ptr = shared + sidx; float4 accum = {0.0f,0.0f,0.0f,0.0f}; for( int jj=-fKernelSize; jj<=fKernelSize; jj++ ) { for( int ii=-fKernelSize; ii<=fKernelSize; ii++ ) { int tmpidx = 4*(jj*slen+ii); float weight= d_kernel[(fKernelSize+jj)*klen+(fKernelSize+ii)]; accum.x += weight*ptr[tmpidx+0]; accum.y += weight*ptr[tmpidx+1]; accum.z += weight*ptr[tmpidx+2]; } } accum.w = shared[sidx+3]; setRGBA(fDestination,fImageWidth,fImageHeight,gx,gy,accum); } __global__ void convolveKernel(cudaTextureObject_t fSource, int fImageWidth, int fImageHeight, uchar* fDestination, int fKernelSize) { extern __shared__ uchar shared[]; const int PADDING = 2*fKernelSize; int slen = blockDim.x + PADDING; int klen = PADDING + 1; int gx = threadIdx.x + blockDim.x * blockIdx.x; int gy = threadIdx.y + blockDim.y * blockIdx.y; int sidx = 4*(threadIdx.y*slen+threadIdx.x); uchar4 pxl = tex2D<uchar4>(fSource,gx-fKernelSize,gy-fKernelSize); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; int ti = threadIdx.x + fKernelSize; int tj = threadIdx.y + fKernelSize; int lx2 = threadIdx.x + blockDim.x; int ly2 = threadIdx.y + blockDim.y; int gx2 = gx + blockDim.x; int gy2 = gy + blockDim.y; if( threadIdx.x < PADDING ) { pxl = tex2D<uchar4>(fSource,gx2-fKernelSize,gy-fKernelSize); sidx= 4*(threadIdx.y*slen+lx2); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } if( threadIdx.y < PADDING ) { pxl = tex2D<uchar4>(fSource,gx-fKernelSize,gy2-fKernelSize); sidx= 4*(ly2*slen+threadIdx.x); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } if( threadIdx.x < PADDING && threadIdx.y < PADDING ) { pxl = tex2D<uchar4>(fSource,gx2-fKernelSize,gy2-fKernelSize); sidx= 4*(ly2*slen+lx2); shared[sidx+0] = pxl.x; shared[sidx+1] = pxl.y; shared[sidx+2] = pxl.z; shared[sidx+3] = pxl.w; } __syncthreads(); // Now that the image has been read // into shared memory completely. // Check for image bounds and exit if( gx >= fImageWidth || gy >= fImageHeight ) return; sidx = 4*(tj*slen+ti); uchar* ptr = shared + sidx; float4 accum = {0.0f,0.0f,0.0f,0.0f}; for( int jj=-fKernelSize; jj<=fKernelSize; jj++ ) { for( int ii=-fKernelSize; ii<=fKernelSize; ii++ ) { int tmpidx = 4*(jj*slen+ii); float weight= d_kernel[(fKernelSize+jj)*klen+(fKernelSize+ii)]; accum.x += weight*ptr[tmpidx+0]; accum.y += weight*ptr[tmpidx+1]; accum.z += weight*ptr[tmpidx+2]; } } accum.w = shared[sidx+3]; setRGBA(fDestination,fImageWidth,fImageHeight,gx,gy,accum); } int ceil(int numer, int denom) { return (numer/denom + (numer % denom != 0)); } MemObject::MemObject() { dev_SourceImage = 0; dev_ConvolvedImage = 0; cuImgArray = 0; // CUDA texture specification memset(&resDesc,0,sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; // CUDA texture object parameters memset(&texDesc,0,sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeWrap; texDesc.addressMode[1] = cudaAddressModeWrap; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords= 0; texObj = 0; } void MemObject::cleanMemory() { #if USE_CUDA_TEX_OBJECT if (texObj) { cudaDestroyTextureObject(texObj); } if (cuImgArray) { cudaFreeArray(cuImgArray); } cuImgArray = 0; texObj = 0; #else if (dev_SourceImage) { cudaFree(dev_SourceImage); } #endif if (dev_ConvolvedImage) { cudaFree(dev_ConvolvedImage); } dev_SourceImage = 0; dev_ConvolvedImage = 0; } MemObject::~MemObject() { cleanMemory(); } MemObject* getMemObject(void) { static MemObject* handle = 0; if( handle == 0 ) { handle = new MemObject(); } return handle; } void initMemObject(void) { getMemObject(); } void setKernelOnDevice(float const * elements, const int count) { HANDLE_ERROR( cudaMemcpyToSymbol(d_kernel, elements, count*sizeof(float)) ); CHECK_CUDA_ERRORS(); } void setImageOnDevice(const uchar * image_data, const int image_width, const int image_height) { MemObject* handle = getMemObject(); handle->cleanMemory(); handle->mImageWidth = image_width; handle->mImageHeight = image_height; #if USE_CUDA_TEX_OBJECT handle->channelDesc = cudaCreateChannelDesc<uchar4>(); HANDLE_ERROR( cudaMallocArray(&(handle->cuImgArray), &(handle->channelDesc), image_width, image_height) ); HANDLE_ERROR( cudaMemcpyToArray(handle->cuImgArray, 0,0, image_data, image_width*image_height*4*sizeof(uchar), cudaMemcpyHostToDevice) ); handle->resDesc.res.array.array = handle->cuImgArray; cudaCreateTextureObject(&(handle->texObj),&(handle->resDesc),&(handle->texDesc),NULL); #else /* Allocate memory on device to hold image data */ HANDLE_ERROR( cudaMalloc((void**)&handle->dev_SourceImage, image_width*image_height*4*sizeof(uchar)) ); CHECK_CUDA_ERRORS(); /* Copy this data to device memory for kernel computation */ HANDLE_ERROR( cudaMemcpy( handle->dev_SourceImage, image_data, image_width*image_height*4*sizeof(uchar), cudaMemcpyHostToDevice) ); CHECK_CUDA_ERRORS(); #endif /* Allocate memory for output image on GPU device memory */ HANDLE_ERROR( cudaMalloc((void**)&handle->dev_ConvolvedImage, image_width*image_height*4*sizeof(uchar)) ); CHECK_CUDA_ERRORS(); } void convolve(const int kernel_radius) { static dim3 mThreadsPerBlock(TILE_WIDTH,TILE_HEIGHT); MemObject* handle = getMemObject(); int image_width = handle->mImageWidth; int image_height = handle->mImageHeight; dim3 mGrid(ceil(image_width, mThreadsPerBlock.x), ceil(image_height, mThreadsPerBlock.y)); int sharedMemSize = (mThreadsPerBlock.y+2*kernel_radius)* (mThreadsPerBlock.x+2*kernel_radius)* 4*sizeof(uchar); #ifdef _DEBUG_PRINTS_ std::cout<<"Threads per block "<<mThreadsPerBlock.x<<","<<mThreadsPerBlock.y<<std::endl; std::cout<<"Blocks per grid "<<mGrid.x<<","<<mGrid.y<<std::endl;; std::cout<<"Shared memory usage : "<<sharedMemSize<<" Bytes"<<std::endl;; #endif #if USE_CUDA_TEX_OBJECT convolveKernel<<<mGrid,mThreadsPerBlock,sharedMemSize>>>(handle->texObj, image_width, image_height, handle->dev_ConvolvedImage, kernel_radius); #else convolveKernel<<<mGrid,mThreadsPerBlock,sharedMemSize>>>(handle->dev_SourceImage, image_width, image_height, handle->dev_ConvolvedImage, kernel_radius); #endif CHECK_CUDA_ERRORS(); cudaDeviceSynchronize(); } void memCpyImageDeviceToHost(uchar* host_ptr) { MemObject* handle = getMemObject(); HANDLE_ERROR( cudaMemcpy(host_ptr, handle->dev_ConvolvedImage, handle->mImageWidth*handle->mImageHeight*4*sizeof(uchar), cudaMemcpyDeviceToHost) ); CHECK_CUDA_ERRORS(); }
96396539ab21384b2a9b61cded1357d69371089f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <gaussian_elimination.h> #include <basic_types.h> #include <texture.h> #include <util.h> #ifdef _WIN32 #pragma warning (push) #pragma warning (disable : 4244 4267 4521) #endif #ifdef _WIN32 #pragma warning (pop) #endif namespace amgx { // Kernel to invert 4by4 matrix on device using one thread // TODO: possible optimizations using multiple threads, possible generalisation for NxN blocks template <typename IndexType, typename ValueTypeA, typename ValueTypeB> __global__ void gaussianElimination4by4Kernel(const IndexType *dia_indices, const ValueTypeA *values, ValueTypeB *x, const ValueTypeB *b) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; ValueTypeB Atemp[BSIZE][BSIZE]; ValueTypeB xtemp[BSIZE]; ValueTypeB btemp[BSIZE]; int offset; if (tid == 0) { #pragma unroll for (int m = 0; m < BSIZE; m++) { btemp[m] = b[m]; } offset = 0; #pragma unroll for (int m = 0; m < BSIZE; m++) #pragma unroll for (int n = 0; n < BSIZE; n++) { types::util<ValueTypeA>::to_uptype(values[offset++], Atemp[m][n]); // only one nonzero block - diagonal } gaussianElimination4by4(Atemp, xtemp, btemp); // Store x #pragma unroll for (int m = 0; m < BSIZE; m++) { x[m] = xtemp[m]; } } } // Method to perform gaussian elimination for a matrix stored in row major format template<typename IndexType, typename ValueTypeA, typename ValueTypeB> void gaussianEliminationRowMajor(ValueTypeA **e, ValueTypeB *x, ValueTypeB *b, const IndexType bsize) { if (bsize > 4) { FatalError("Warning, Gaussian Elimination code doesn't pivot and bsize > 4", AMGX_ERR_BAD_PARAMETERS); } ValueTypeB pivot, ratio, tmp; for (int j = 0; j < bsize; j++) { types::util<ValueTypeA>::to_uptype(e[j][j], pivot); for (int k = j + 1; k < bsize; k++) { ValueTypeB temp; types::util<ValueTypeA>::to_uptype(e[k][j], temp); ratio = temp / pivot; for (int m = j + 1; m < bsize; m++) { e[k][m] = e[k][m] - e[j][m] * ratio; } b[k] = b[k] - b[j] * ratio; } } // back substitution for (int j = bsize - 1; j >= 0; j--) { tmp = types::util<ValueTypeB>::get_zero(); for (int k = j + 1; k < bsize; k++) { tmp = tmp + e[j][k] * x[k]; } x[j] = (b[j] - tmp) / e[j][j]; } } // Method to perform gaussian elimination on matrix stored in block_dia_csr_matrix_h template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GaussianElimination<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::gaussianElimination_4x4_host(const Matrix_h &A, Vector_h &x, const Vector_h &b) { typedef typename Matrix_h::value_type ValueTypeA; typedef typename Vector_h::value_type ValueTypeB; if (A.get_num_rows() != 1) { FatalError("Haven't implemented gaussian elimination solver for num_blocks != 1", AMGX_ERR_BAD_PARAMETERS); } else { int bsize = A.get_block_dimy(); // Allocate space for block_matrix ValueTypeA **Atemp = new ValueTypeA* [bsize]; for ( int i = 0; i < bsize; i++) { Atemp[i] = new ValueTypeA[bsize]; } ValueTypeB *btemp = new ValueTypeB[bsize]; ValueTypeB *xtemp = new ValueTypeB[bsize]; // Copy matrix and rhs for (int m = 0; m < bsize; m++) { for (int n = 0; n < bsize; n++) { Atemp[m][n] = A.values[bsize * m + n]; //diag[bsize*m + n]; since we have 1 nonzero only } btemp[m] = b[m]; } ValueTypeB *x_ptr = x.raw(); gaussianEliminationRowMajor(Atemp, x_ptr, btemp, bsize); } } // Method to perform gaussian elimination on matrix stored in block_dia_csr_matrix_d template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GaussianElimination<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::gaussianElimination_4x4_device(const Matrix_d &A, Vector_d &x, const Vector_d &b) { typedef typename Matrix_d::value_type ValueTypeA; typedef typename Vector_d::value_type ValueTypeB; typedef typename Matrix_d::index_type IndexType; if (A.get_num_rows() != 1) { FatalError("Haven't implemented gaussian elimination solver for num_blocks != 1 or block_size != 4", AMGX_ERR_BAD_PARAMETERS); } else { const IndexType *dia_ptr = A.diag.raw(); const ValueTypeA *values_ptr = A.values.raw(); const ValueTypeB *b_ptr = b.raw(); ValueTypeB *x_ptr = x.raw(); hipLaunchKernelGGL(( gaussianElimination4by4Kernel) , dim3(1), dim3(1), 0, 0, dia_ptr, values_ptr, x_ptr, b_ptr); cudaCheckError(); } } template <class TConfig> void GaussianElimination<TConfig>::gaussianElimination(const Matrix<TConfig> &A, Vector<TConfig> &x, const Vector<TConfig> &b) { FatalError("Matrix format not supported in gaussian_elimination", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GaussianElimination<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::gaussianElimination(const Matrix_h &A, Vector_h &x, const Vector_h &b) { if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4) { gaussianElimination_4x4_host(A, x, b); } else { FatalError("gaussElimination: Blocksize is unsupported", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GaussianElimination<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::gaussianElimination(const Matrix_d &A, Vector_d &x, const Vector_d &b) { if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4) { gaussianElimination_4x4_device(A, x, b); } else { FatalError("gaussElimination: Blocksize is unsupported", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } } // ------------------------------- // Explict instantiations // ------------------------------- #define AMGX_CASE_LINE(CASE) template class GaussianElimination<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace amgx
96396539ab21384b2a9b61cded1357d69371089f.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <gaussian_elimination.h> #include <basic_types.h> #include <texture.h> #include <util.h> #ifdef _WIN32 #pragma warning (push) #pragma warning (disable : 4244 4267 4521) #endif #ifdef _WIN32 #pragma warning (pop) #endif namespace amgx { // Kernel to invert 4by4 matrix on device using one thread // TODO: possible optimizations using multiple threads, possible generalisation for NxN blocks template <typename IndexType, typename ValueTypeA, typename ValueTypeB> __global__ void gaussianElimination4by4Kernel(const IndexType *dia_indices, const ValueTypeA *values, ValueTypeB *x, const ValueTypeB *b) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; ValueTypeB Atemp[BSIZE][BSIZE]; ValueTypeB xtemp[BSIZE]; ValueTypeB btemp[BSIZE]; int offset; if (tid == 0) { #pragma unroll for (int m = 0; m < BSIZE; m++) { btemp[m] = b[m]; } offset = 0; #pragma unroll for (int m = 0; m < BSIZE; m++) #pragma unroll for (int n = 0; n < BSIZE; n++) { types::util<ValueTypeA>::to_uptype(values[offset++], Atemp[m][n]); // only one nonzero block - diagonal } gaussianElimination4by4(Atemp, xtemp, btemp); // Store x #pragma unroll for (int m = 0; m < BSIZE; m++) { x[m] = xtemp[m]; } } } // Method to perform gaussian elimination for a matrix stored in row major format template<typename IndexType, typename ValueTypeA, typename ValueTypeB> void gaussianEliminationRowMajor(ValueTypeA **e, ValueTypeB *x, ValueTypeB *b, const IndexType bsize) { if (bsize > 4) { FatalError("Warning, Gaussian Elimination code doesn't pivot and bsize > 4", AMGX_ERR_BAD_PARAMETERS); } ValueTypeB pivot, ratio, tmp; for (int j = 0; j < bsize; j++) { types::util<ValueTypeA>::to_uptype(e[j][j], pivot); for (int k = j + 1; k < bsize; k++) { ValueTypeB temp; types::util<ValueTypeA>::to_uptype(e[k][j], temp); ratio = temp / pivot; for (int m = j + 1; m < bsize; m++) { e[k][m] = e[k][m] - e[j][m] * ratio; } b[k] = b[k] - b[j] * ratio; } } // back substitution for (int j = bsize - 1; j >= 0; j--) { tmp = types::util<ValueTypeB>::get_zero(); for (int k = j + 1; k < bsize; k++) { tmp = tmp + e[j][k] * x[k]; } x[j] = (b[j] - tmp) / e[j][j]; } } // Method to perform gaussian elimination on matrix stored in block_dia_csr_matrix_h template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GaussianElimination<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::gaussianElimination_4x4_host(const Matrix_h &A, Vector_h &x, const Vector_h &b) { typedef typename Matrix_h::value_type ValueTypeA; typedef typename Vector_h::value_type ValueTypeB; if (A.get_num_rows() != 1) { FatalError("Haven't implemented gaussian elimination solver for num_blocks != 1", AMGX_ERR_BAD_PARAMETERS); } else { int bsize = A.get_block_dimy(); // Allocate space for block_matrix ValueTypeA **Atemp = new ValueTypeA* [bsize]; for ( int i = 0; i < bsize; i++) { Atemp[i] = new ValueTypeA[bsize]; } ValueTypeB *btemp = new ValueTypeB[bsize]; ValueTypeB *xtemp = new ValueTypeB[bsize]; // Copy matrix and rhs for (int m = 0; m < bsize; m++) { for (int n = 0; n < bsize; n++) { Atemp[m][n] = A.values[bsize * m + n]; //diag[bsize*m + n]; since we have 1 nonzero only } btemp[m] = b[m]; } ValueTypeB *x_ptr = x.raw(); gaussianEliminationRowMajor(Atemp, x_ptr, btemp, bsize); } } // Method to perform gaussian elimination on matrix stored in block_dia_csr_matrix_d template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GaussianElimination<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::gaussianElimination_4x4_device(const Matrix_d &A, Vector_d &x, const Vector_d &b) { typedef typename Matrix_d::value_type ValueTypeA; typedef typename Vector_d::value_type ValueTypeB; typedef typename Matrix_d::index_type IndexType; if (A.get_num_rows() != 1) { FatalError("Haven't implemented gaussian elimination solver for num_blocks != 1 or block_size != 4", AMGX_ERR_BAD_PARAMETERS); } else { const IndexType *dia_ptr = A.diag.raw(); const ValueTypeA *values_ptr = A.values.raw(); const ValueTypeB *b_ptr = b.raw(); ValueTypeB *x_ptr = x.raw(); gaussianElimination4by4Kernel <<< 1, 1>>>(dia_ptr, values_ptr, x_ptr, b_ptr); cudaCheckError(); } } template <class TConfig> void GaussianElimination<TConfig>::gaussianElimination(const Matrix<TConfig> &A, Vector<TConfig> &x, const Vector<TConfig> &b) { FatalError("Matrix format not supported in gaussian_elimination", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GaussianElimination<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::gaussianElimination(const Matrix_h &A, Vector_h &x, const Vector_h &b) { if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4) { gaussianElimination_4x4_host(A, x, b); } else { FatalError("gaussElimination: Blocksize is unsupported", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GaussianElimination<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::gaussianElimination(const Matrix_d &A, Vector_d &x, const Vector_d &b) { if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4) { gaussianElimination_4x4_device(A, x, b); } else { FatalError("gaussElimination: Blocksize is unsupported", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } } // ------------------------------- // Explict instantiations // ------------------------------- #define AMGX_CASE_LINE(CASE) template class GaussianElimination<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace amgx
9f8218864224f676d583aa179d886031a4bd0452.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #if defined _WIN32 || defined __APPLE__ #else #define _LINUX #endif #if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS) #include "paddle/fluid/framework/data_feed.h" namespace paddle { namespace framework { #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // CUDA: use 512 threads per block const int CUDA_NUM_THREADS = 512; // CUDA: number of blocks for threads. inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } // fill slot values __global__ void FillSlotValueOffsetKernel( const int ins_num, const int used_slot_num, size_t *slot_value_offsets, const int *uint64_offsets, const int uint64_slot_size, const int *float_offsets, const int float_slot_size, const UsedSlotGpuType *used_slots) { int col_num = ins_num + 1; int uint64_cols = uint64_slot_size + 1; int float_cols = float_slot_size + 1; CUDA_KERNEL_LOOP(slot_idx, used_slot_num) { int value_off = slot_idx * col_num; slot_value_offsets[value_off] = 0; auto &info = used_slots[slot_idx]; if (info.is_uint64_value) { for (int k = 0; k < ins_num; ++k) { int pos = k * uint64_cols + info.slot_value_idx; int num = uint64_offsets[pos + 1] - uint64_offsets[pos]; PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0."); slot_value_offsets[value_off + k + 1] = slot_value_offsets[value_off + k] + num; } } else { for (int k = 0; k < ins_num; ++k) { int pos = k * float_cols + info.slot_value_idx; int num = float_offsets[pos + 1] - float_offsets[pos]; PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0."); slot_value_offsets[value_off + k + 1] = slot_value_offsets[value_off + k] + num; } } } } void SlotRecordInMemoryDataFeed::FillSlotValueOffset( const int ins_num, const int used_slot_num, size_t *slot_value_offsets, const int *uint64_offsets, const int uint64_slot_size, const int *float_offsets, const int float_slot_size, const UsedSlotGpuType *used_slots) { auto stream = dynamic_cast<platform::CUDADeviceContext *>( paddle::platform::DeviceContextPool::Instance().Get(this->place_)) ->stream(); hipLaunchKernelGGL(( FillSlotValueOffsetKernel), dim3(GET_BLOCKS(used_slot_num)), dim3(CUDA_NUM_THREADS), 0, stream, ins_num, used_slot_num, slot_value_offsets, uint64_offsets, uint64_slot_size, float_offsets, float_slot_size, used_slots); hipStreamSynchronize(stream); } __global__ void CopyForTensorKernel( const int used_slot_num, const int ins_num, void **dest, const size_t *slot_value_offsets, const uint64_t *uint64_feas, const int *uint64_offsets, const int *uint64_ins_lens, const int uint64_slot_size, const float *float_feas, const int *float_offsets, const int *float_ins_lens, const int float_slot_size, const UsedSlotGpuType *used_slots) { int col_num = ins_num + 1; int uint64_cols = uint64_slot_size + 1; int float_cols = float_slot_size + 1; CUDA_KERNEL_LOOP(i, ins_num * used_slot_num) { int slot_idx = i / ins_num; int ins_idx = i % ins_num; uint32_t value_offset = slot_value_offsets[slot_idx * col_num + ins_idx]; auto &info = used_slots[slot_idx]; if (info.is_uint64_value) { uint64_t *up = reinterpret_cast<uint64_t *>(dest[slot_idx]); int index = info.slot_value_idx + uint64_cols * ins_idx; int old_off = uint64_offsets[index]; int num = uint64_offsets[index + 1] - old_off; PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0."); int uint64_value_offset = uint64_ins_lens[ins_idx]; for (int k = 0; k < num; ++k) { up[k + value_offset] = uint64_feas[k + old_off + uint64_value_offset]; } } else { float *fp = reinterpret_cast<float *>(dest[slot_idx]); int index = info.slot_value_idx + float_cols * ins_idx; int old_off = float_offsets[index]; int num = float_offsets[index + 1] - old_off; PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0."); int float_value_offset = float_ins_lens[ins_idx]; for (int k = 0; k < num; ++k) { fp[k + value_offset] = float_feas[k + old_off + float_value_offset]; } } } } void SlotRecordInMemoryDataFeed::CopyForTensor( const int ins_num, const int used_slot_num, void **dest, const size_t *slot_value_offsets, const uint64_t *uint64_feas, const int *uint64_offsets, const int *uint64_ins_lens, const int uint64_slot_size, const float *float_feas, const int *float_offsets, const int *float_ins_lens, const int float_slot_size, const UsedSlotGpuType *used_slots) { auto stream = dynamic_cast<platform::CUDADeviceContext *>( paddle::platform::DeviceContextPool::Instance().Get(this->place_)) ->stream(); hipLaunchKernelGGL(( CopyForTensorKernel), dim3(GET_BLOCKS(used_slot_num * ins_num)), dim3(CUDA_NUM_THREADS), 0, stream, used_slot_num, ins_num, dest, slot_value_offsets, uint64_feas, uint64_offsets, uint64_ins_lens, uint64_slot_size, float_feas, float_offsets, float_ins_lens, float_slot_size, used_slots); hipStreamSynchronize(stream); } } // namespace framework } // namespace paddle #endif
9f8218864224f676d583aa179d886031a4bd0452.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #if defined _WIN32 || defined __APPLE__ #else #define _LINUX #endif #if defined(PADDLE_WITH_CUDA) && defined(PADDLE_WITH_HETERPS) #include "paddle/fluid/framework/data_feed.h" namespace paddle { namespace framework { #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) // CUDA: use 512 threads per block const int CUDA_NUM_THREADS = 512; // CUDA: number of blocks for threads. inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } // fill slot values __global__ void FillSlotValueOffsetKernel( const int ins_num, const int used_slot_num, size_t *slot_value_offsets, const int *uint64_offsets, const int uint64_slot_size, const int *float_offsets, const int float_slot_size, const UsedSlotGpuType *used_slots) { int col_num = ins_num + 1; int uint64_cols = uint64_slot_size + 1; int float_cols = float_slot_size + 1; CUDA_KERNEL_LOOP(slot_idx, used_slot_num) { int value_off = slot_idx * col_num; slot_value_offsets[value_off] = 0; auto &info = used_slots[slot_idx]; if (info.is_uint64_value) { for (int k = 0; k < ins_num; ++k) { int pos = k * uint64_cols + info.slot_value_idx; int num = uint64_offsets[pos + 1] - uint64_offsets[pos]; PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0."); slot_value_offsets[value_off + k + 1] = slot_value_offsets[value_off + k] + num; } } else { for (int k = 0; k < ins_num; ++k) { int pos = k * float_cols + info.slot_value_idx; int num = float_offsets[pos + 1] - float_offsets[pos]; PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0."); slot_value_offsets[value_off + k + 1] = slot_value_offsets[value_off + k] + num; } } } } void SlotRecordInMemoryDataFeed::FillSlotValueOffset( const int ins_num, const int used_slot_num, size_t *slot_value_offsets, const int *uint64_offsets, const int uint64_slot_size, const int *float_offsets, const int float_slot_size, const UsedSlotGpuType *used_slots) { auto stream = dynamic_cast<platform::CUDADeviceContext *>( paddle::platform::DeviceContextPool::Instance().Get(this->place_)) ->stream(); FillSlotValueOffsetKernel<<<GET_BLOCKS(used_slot_num), CUDA_NUM_THREADS, 0, stream>>>( ins_num, used_slot_num, slot_value_offsets, uint64_offsets, uint64_slot_size, float_offsets, float_slot_size, used_slots); cudaStreamSynchronize(stream); } __global__ void CopyForTensorKernel( const int used_slot_num, const int ins_num, void **dest, const size_t *slot_value_offsets, const uint64_t *uint64_feas, const int *uint64_offsets, const int *uint64_ins_lens, const int uint64_slot_size, const float *float_feas, const int *float_offsets, const int *float_ins_lens, const int float_slot_size, const UsedSlotGpuType *used_slots) { int col_num = ins_num + 1; int uint64_cols = uint64_slot_size + 1; int float_cols = float_slot_size + 1; CUDA_KERNEL_LOOP(i, ins_num * used_slot_num) { int slot_idx = i / ins_num; int ins_idx = i % ins_num; uint32_t value_offset = slot_value_offsets[slot_idx * col_num + ins_idx]; auto &info = used_slots[slot_idx]; if (info.is_uint64_value) { uint64_t *up = reinterpret_cast<uint64_t *>(dest[slot_idx]); int index = info.slot_value_idx + uint64_cols * ins_idx; int old_off = uint64_offsets[index]; int num = uint64_offsets[index + 1] - old_off; PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0."); int uint64_value_offset = uint64_ins_lens[ins_idx]; for (int k = 0; k < num; ++k) { up[k + value_offset] = uint64_feas[k + old_off + uint64_value_offset]; } } else { float *fp = reinterpret_cast<float *>(dest[slot_idx]); int index = info.slot_value_idx + float_cols * ins_idx; int old_off = float_offsets[index]; int num = float_offsets[index + 1] - old_off; PADDLE_ENFORCE(num >= 0, "The number of slot size must be ge 0."); int float_value_offset = float_ins_lens[ins_idx]; for (int k = 0; k < num; ++k) { fp[k + value_offset] = float_feas[k + old_off + float_value_offset]; } } } } void SlotRecordInMemoryDataFeed::CopyForTensor( const int ins_num, const int used_slot_num, void **dest, const size_t *slot_value_offsets, const uint64_t *uint64_feas, const int *uint64_offsets, const int *uint64_ins_lens, const int uint64_slot_size, const float *float_feas, const int *float_offsets, const int *float_ins_lens, const int float_slot_size, const UsedSlotGpuType *used_slots) { auto stream = dynamic_cast<platform::CUDADeviceContext *>( paddle::platform::DeviceContextPool::Instance().Get(this->place_)) ->stream(); CopyForTensorKernel<<<GET_BLOCKS(used_slot_num * ins_num), CUDA_NUM_THREADS, 0, stream>>>( used_slot_num, ins_num, dest, slot_value_offsets, uint64_feas, uint64_offsets, uint64_ins_lens, uint64_slot_size, float_feas, float_offsets, float_ins_lens, float_slot_size, used_slots); cudaStreamSynchronize(stream); } } // namespace framework } // namespace paddle #endif
7e0683c908e35e186458508811d4d2fd71ce3ff7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// kernel code : this gets executed on the GPU /// e = fEpsilon /// f = fEpsilon*fEpsilon __global__ void mykernel ( float* pDataInD_Raw, float* pDataInD_Index, uint4* pDataInD_State, unsigned int* pDataOutD_Atom, ulong2* pDataOutD_List, const float e, const float f, const int iElementOffsetInPoints, const int iViewDataOffsetInPoints) { //~ extern __shared__ float3 shared[]; __shared__ float3 shared[kThreadBlockSize]; int* pfSharedEndMark = (int*)shared; float* pfSharedReadCache = (float*)(&pfSharedEndMark[1]); // thread runtime environment, 1D parametrization unsigned int iMyResultIndex; unsigned int iMyLocalDataIndex; #if GRIDHEIGHT > 1 #define BLOCKIDX (blockIdx.x * GRIDHEIGHT + blockIdx.y) #else #define BLOCKIDX (blockIdx.x) #endif const int tid_global = (BLOCKIDX * blockDim.x) + threadIdx.x; // gridDim.x (global refers to thread-grid here...) // read coordinates relevant for index calc (so thread-wrap bounds can be calculated) int iMyLocalID = iElementOffsetInPoints + tid_global; // id local within this datablock, add iViewDataOffsetInPoints to get global shared[threadIdx.x].x = pDataInD_Raw[iMyLocalID*D + 0]; shared[threadIdx.x].y = pDataInD_Raw[iMyLocalID*D + 1]; shared[threadIdx.x].z = pDataInD_Raw[iMyLocalID*D + 2]; // init blocker to zero if (threadIdx.x == 0) pfSharedEndMark[0] = 0; // sync all threads in warp __syncthreads(); // calculate the bounds for the wrap float3 vMin = shared[0]; float3 vMax = vMin; float3 vCur; {for (int d=1;d<kThreadBlockSize;++d) { // compiler should loop-unroll vCur = shared[d]; vMin.x = min(vMin.x,vCur.x); vMin.y = min(vMin.y,vCur.y); vMin.z = min(vMin.z,vCur.z); vMax.x = max(vMax.x,vCur.x); vMax.y = max(vMax.y,vCur.y); vMax.z = max(vMax.z,vCur.z); }} // add epsilon to the edges of the bounds vMin.x = vMin.x - e; vMin.y = vMin.y - e; vMin.z = vMin.z - e; vMax.x = vMax.x + e; vMax.y = vMax.y + e; vMax.z = vMax.z + e; // read remaining own coordinates float element[D]; element[0] = shared[threadIdx.x].x; element[1] = shared[threadIdx.x].y; element[2] = shared[threadIdx.x].z; {for (int d=3;d<D;++d) element[d] = pDataInD_Raw[iMyLocalID*D + d];} // compiler should loop-unroll // read state (todo : state is equal for the whole wrap) uint4 mystate = pDataInD_State[BLOCKIDX]; int x = mystate.x; int y = mystate.y; int z = mystate.z; int w = mystate.w; float fSqDist,a; #define K_I_0(a) (pDataInD_Index[INDEXPOS_0( ((int)x)+(a))]) #define K_I_1(a) (pDataInD_Index[INDEXPOS_1((int)x, ((int)y)+(a))]) #define K_I_2(a) (pDataInD_Index[INDEXPOS_2((int)x, (int)y, ((int)z)+(a))]) #define K_INIT_INDEX iMyLocalDataIndex = ((int)x)*SX + ((int)y)*SY + ((int)z)*SZ - iViewDataOffsetInPoints; #define GPU_IDX1 (K_I_0(1) >= vMin.x && K_I_0(0) <= vMax.x) #define GPU_IDX2 (K_I_1(1) >= vMin.y && K_I_1(0) <= vMax.y) #define GPU_IDX3 (K_I_2(1) >= vMin.z && K_I_2(0) <= vMax.z) #ifndef ENABLE_GPU_IDX3 #undef GPU_IDX3 #define GPU_IDX3 (1) #endif //~ x = y = z = I0; // debug hack to terminate thread quickly // detect if started with finished state for (;x<I0;++x) if (GPU_IDX1) { for (;y<I0;++y) if (GPU_IDX2) { for (;z<I0;++z) if (GPU_IDX3) { K_INIT_INDEX for (;w<SZ;++w,++iMyLocalDataIndex) { // compiler should loop-unroll ? __syncthreads(); if (threadIdx.x < D) pfSharedReadCache[threadIdx.x] = pDataInD_Raw[iMyLocalDataIndex*D + threadIdx.x]; __syncthreads(); // calc square distance fSqDist = element[0] - pfSharedReadCache[0]; fSqDist *= fSqDist; #define MYADD(d) a = element[d] - pfSharedReadCache[d]; fSqDist += a*a; MYADD(1) MYADD(2) MYADD(3) MYADD(4) MYADD(5) MYADD(6) MYADD(7) MYADD(8) #undef MYADD // warning, a loop was not unrolled by the compiler here, so i had to do it manually... // RESULT LIST if (fSqDist < f && iMyLocalDataIndex > iMyLocalID) { iMyResultIndex = atomicInc(pDataOutD_Atom,0xffffffff); if (iMyResultIndex <= kLastValidResultIndex) { pDataOutD_List[iMyResultIndex] = make_ulong2(iMyLocalID,iMyLocalDataIndex); // result is in local ids, and must be transformed outside } else { // mark overflow in resultlist pfSharedEndMark[0] = 1; } } // wait for others //__syncthreads(); // todo : try without this ! //~ // abort if overlow in resultlist if (pfSharedEndMark[0]) { if (threadIdx.x == 0) pDataInD_State[BLOCKIDX] = make_uint4(x,y,z,w); x=kStateEndValue; y=kStateEndValue; z=kStateEndValue; w=kStateEndValue; } } w = 0; } z = 0; } y = 0; } // if we finished normally, save state as finished if (threadIdx.x == 0 && x < kStateEndValue) pDataInD_State[BLOCKIDX] = make_uint4(kStateEndValue,kStateEndValue,kStateEndValue,kStateEndValue); #undef K_I_0 #undef K_I_1 #undef K_I_2 #undef K_INIT_INDEX #undef GPU_IDX1 #undef GPU_IDX2 #undef GPU_IDX3 #undef BLOCKIDX }
7e0683c908e35e186458508811d4d2fd71ce3ff7.cu
/// kernel code : this gets executed on the GPU /// e = fEpsilon /// f = fEpsilon*fEpsilon __global__ void mykernel ( float* pDataInD_Raw, float* pDataInD_Index, uint4* pDataInD_State, unsigned int* pDataOutD_Atom, ulong2* pDataOutD_List, const float e, const float f, const int iElementOffsetInPoints, const int iViewDataOffsetInPoints) { //~ extern __shared__ float3 shared[]; __shared__ float3 shared[kThreadBlockSize]; int* pfSharedEndMark = (int*)shared; float* pfSharedReadCache = (float*)(&pfSharedEndMark[1]); // thread runtime environment, 1D parametrization unsigned int iMyResultIndex; unsigned int iMyLocalDataIndex; #if GRIDHEIGHT > 1 #define BLOCKIDX (blockIdx.x * GRIDHEIGHT + blockIdx.y) #else #define BLOCKIDX (blockIdx.x) #endif const int tid_global = (BLOCKIDX * blockDim.x) + threadIdx.x; // gridDim.x (global refers to thread-grid here...) // read coordinates relevant for index calc (so thread-wrap bounds can be calculated) int iMyLocalID = iElementOffsetInPoints + tid_global; // id local within this datablock, add iViewDataOffsetInPoints to get global shared[threadIdx.x].x = pDataInD_Raw[iMyLocalID*D + 0]; shared[threadIdx.x].y = pDataInD_Raw[iMyLocalID*D + 1]; shared[threadIdx.x].z = pDataInD_Raw[iMyLocalID*D + 2]; // init blocker to zero if (threadIdx.x == 0) pfSharedEndMark[0] = 0; // sync all threads in warp __syncthreads(); // calculate the bounds for the wrap float3 vMin = shared[0]; float3 vMax = vMin; float3 vCur; {for (int d=1;d<kThreadBlockSize;++d) { // compiler should loop-unroll vCur = shared[d]; vMin.x = min(vMin.x,vCur.x); vMin.y = min(vMin.y,vCur.y); vMin.z = min(vMin.z,vCur.z); vMax.x = max(vMax.x,vCur.x); vMax.y = max(vMax.y,vCur.y); vMax.z = max(vMax.z,vCur.z); }} // add epsilon to the edges of the bounds vMin.x = vMin.x - e; vMin.y = vMin.y - e; vMin.z = vMin.z - e; vMax.x = vMax.x + e; vMax.y = vMax.y + e; vMax.z = vMax.z + e; // read remaining own coordinates float element[D]; element[0] = shared[threadIdx.x].x; element[1] = shared[threadIdx.x].y; element[2] = shared[threadIdx.x].z; {for (int d=3;d<D;++d) element[d] = pDataInD_Raw[iMyLocalID*D + d];} // compiler should loop-unroll // read state (todo : state is equal for the whole wrap) uint4 mystate = pDataInD_State[BLOCKIDX]; int x = mystate.x; int y = mystate.y; int z = mystate.z; int w = mystate.w; float fSqDist,a; #define K_I_0(a) (pDataInD_Index[INDEXPOS_0( ((int)x)+(a))]) #define K_I_1(a) (pDataInD_Index[INDEXPOS_1((int)x, ((int)y)+(a))]) #define K_I_2(a) (pDataInD_Index[INDEXPOS_2((int)x, (int)y, ((int)z)+(a))]) #define K_INIT_INDEX iMyLocalDataIndex = ((int)x)*SX + ((int)y)*SY + ((int)z)*SZ - iViewDataOffsetInPoints; #define GPU_IDX1 (K_I_0(1) >= vMin.x && K_I_0(0) <= vMax.x) #define GPU_IDX2 (K_I_1(1) >= vMin.y && K_I_1(0) <= vMax.y) #define GPU_IDX3 (K_I_2(1) >= vMin.z && K_I_2(0) <= vMax.z) #ifndef ENABLE_GPU_IDX3 #undef GPU_IDX3 #define GPU_IDX3 (1) #endif //~ x = y = z = I0; // debug hack to terminate thread quickly // detect if started with finished state for (;x<I0;++x) if (GPU_IDX1) { for (;y<I0;++y) if (GPU_IDX2) { for (;z<I0;++z) if (GPU_IDX3) { K_INIT_INDEX for (;w<SZ;++w,++iMyLocalDataIndex) { // compiler should loop-unroll ? __syncthreads(); if (threadIdx.x < D) pfSharedReadCache[threadIdx.x] = pDataInD_Raw[iMyLocalDataIndex*D + threadIdx.x]; __syncthreads(); // calc square distance fSqDist = element[0] - pfSharedReadCache[0]; fSqDist *= fSqDist; #define MYADD(d) a = element[d] - pfSharedReadCache[d]; fSqDist += a*a; MYADD(1) MYADD(2) MYADD(3) MYADD(4) MYADD(5) MYADD(6) MYADD(7) MYADD(8) #undef MYADD // warning, a loop was not unrolled by the compiler here, so i had to do it manually... // RESULT LIST if (fSqDist < f && iMyLocalDataIndex > iMyLocalID) { iMyResultIndex = atomicInc(pDataOutD_Atom,0xffffffff); if (iMyResultIndex <= kLastValidResultIndex) { pDataOutD_List[iMyResultIndex] = make_ulong2(iMyLocalID,iMyLocalDataIndex); // result is in local ids, and must be transformed outside } else { // mark overflow in resultlist pfSharedEndMark[0] = 1; } } // wait for others //__syncthreads(); // todo : try without this ! //~ // abort if overlow in resultlist if (pfSharedEndMark[0]) { if (threadIdx.x == 0) pDataInD_State[BLOCKIDX] = make_uint4(x,y,z,w); x=kStateEndValue; y=kStateEndValue; z=kStateEndValue; w=kStateEndValue; } } w = 0; } z = 0; } y = 0; } // if we finished normally, save state as finished if (threadIdx.x == 0 && x < kStateEndValue) pDataInD_State[BLOCKIDX] = make_uint4(kStateEndValue,kStateEndValue,kStateEndValue,kStateEndValue); #undef K_I_0 #undef K_I_1 #undef K_I_2 #undef K_INIT_INDEX #undef GPU_IDX1 #undef GPU_IDX2 #undef GPU_IDX3 #undef BLOCKIDX }
a1662913aae4cecf59d7892afef7c97aed4fa370.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> __device__ float devData; __global__ void checkGlobalVariable() { printf("Device: The value of global variable is %f\n", devData); devData += 2.0; } int main() { float value = 3.14f; hipMemcpyToSymbol(&devData, &value, sizeof(value)); printf("Host: copied %f to the global variable\n", value); hipLaunchKernelGGL(( checkGlobalVariable), dim3(1), dim3(1) , 0, 0, ); hipMemcpyFromSymbol(&value, &devData, sizeof(value)); printf("Host: Value changed by kernel to %f\n", value); hipDeviceReset(); system("Pause"); return 0; }
a1662913aae4cecf59d7892afef7c97aed4fa370.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> __device__ float devData; __global__ void checkGlobalVariable() { printf("Device: The value of global variable is %f\n", devData); devData += 2.0; } int main() { float value = 3.14f; cudaMemcpyToSymbol(&devData, &value, sizeof(value)); printf("Host: copied %f to the global variable\n", value); checkGlobalVariable<<<1, 1 >>> (); cudaMemcpyFromSymbol(&value, &devData, sizeof(value)); printf("Host: Value changed by kernel to %f\n", value); cudaDeviceReset(); system("Pause"); return 0; }
7d8f8a57e3f02dd67f33c08536d222fd34c77e1a.hip
// !!! This is a file automatically generated by hipify!!! #include "BvttFrontLooseKernels.cuh" #include <hip/hip_runtime.h> #include "utility\CudaDeviceUtils.h" #include "setting\CDBenchmarkSettings.h" #include "collision\lbvh\BvhExtNode.h" #include "collision\lbvh\BvhIntNode.h" #include "collision\auxiliary\FlOrderLog.h" namespace mn { /// maintain intra fronts __global__ void maintainIntLooseIntraFrontsWithLog(const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _prims.getBV(cp.x); int t; #if MACRO_VERSION const int3 ids = _prims.getVids(cp.x); #endif if (!_tks.overlaps(st, bv)) { ///< prune for (t = _lvs.getlca(_tks.getrangex(st)) >> 1, st--; st >= t && !_tks.overlaps(st, bv); st--); if (st < t && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); return; } cp.y = _lvs.getlca(_tks.getrangey(st) + 1); st = (_tks.getlc(st) << 1) | (_tks.getmark(st) & 1); do { ///< sprout t = st & 1; st >>= 1; if (!t) for (t = _lvs.getpar(idx = _tks.getrangex(st)); st <= t && _tks.overlaps(st, bv); st++); else t = st - 1, idx = st; if (st > t) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = make_int2(cp.x, idx); atomicAdd(&_log.extcnt(idx), 1); if ( #if MACRO_VERSION !covertex(ids, _prims.getVids(idx)) && #endif _lvs.overlaps(idx, bv)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.x), _prims.getidx(idx)); } st = _lvs.getlca(idx + 1); } else { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); st = _lvs.getlca(_tks.getrangey(st) + 1); } } while (st != cp.y); } /// ext fronts __global__ void maintainExtLooseIntraFrontsWithLog(const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); const int2 cp = _extFront[idx]; int st = cp.y, gfa; const BOX bv = _prims.getBV(cp.x); if (!_lvs.overlaps(st, bv)) { gfa = _lvs.getpar(st); if (_tks.overlaps(gfa, bv)) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); return; } if ((_lvs.getmark(idx = st) & 4) == 4) ///< or _lca[st] & 1 return; for (st = gfa - 1, gfa = _lvs.getlca(idx) >> 1; st >= gfa && !_tks.overlaps(st, bv); st--); if (st < gfa && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); } else { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); #if MACRO_VERSION if (!covertex(_prims.getVids(cp.x), _prims.getVids(st))) #endif _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.x), _prims.getidx(st)); } } __global__ void sproutIntLooseIntraFrontsWithLog(BvhExtNodeCompletePort _lvs, BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _prims.getBV(cp.x); if (!_tks.overlaps(st, bv)) { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); return; } cp.y = _lvs.getlca(_tks.getrangey(st) + 1); st = (_tks.getlc(st) << 1) | (_tks.getmark(st) & 1); #if MACRO_VERSION const int3 ids = _prims.getVids(cp.x); #endif do { int t = st & 1; st >>= 1; if (!t) for (t = _lvs.getpar(idx = _tks.getrangex(st)); st <= t && _tks.overlaps(st, bv); st++); else t = st - 1, idx = st; if (st > t) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = make_int2(cp.x, idx); atomicAdd(&_log.extcnt(idx), 1); if ( #if MACRO_VERSION !covertex(ids, _prims.getVids(idx)) && #endif _lvs.overlaps(idx, bv)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.x), _prims.getidx(idx)); } st = _lvs.getlca(idx + 1); } else { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); st = _lvs.getlca(_tks.getrangey(st) + 1); } } while (st != cp.y); } __global__ void sproutExtLooseIntraFrontsWithLog(BvhExtNodeCompletePort _lvs, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto& _prims = _lvs.primPort(); int2 cp = _extFront[idx]; atomicAdd(&_log.extcnt(cp.y), 1); if ( #if MACRO_VERSION !covertex(_prims.getVids(cp.x), _prims.getVids(cp.y)) && #endif _lvs.overlaps(cp.x, cp.y)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.x), _prims.getidx(cp.y)); } } __global__ void pruneIntLooseIntraFrontsWithLog(const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _prims.getBV(cp.x); int t; /// assume not colliding for (t = _lvs.getlca(_tks.getrangex(st)) >> 1, st--; st >= t && !_tks.overlaps(st, bv); st--); if (st < t && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); return; } __global__ void pruneExtLooseIntraFrontsWithLog(const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); const int2 cp = _extFront[idx]; int st = cp.y, gfa; const BOX bv = _prims.getBV(cp.x); if (!_lvs.overlaps(st, bv)) { gfa = _lvs.getpar(st); if (_tks.overlaps(gfa, bv)) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); return; } if ((_lvs.getmark(idx = st) & 4) == 4) ///< or _lca[st] & 1 return; for (st = gfa - 1, gfa = _lvs.getlca(idx) >> 1; st >= gfa && !_tks.overlaps(st, bv); st--); if (st < gfa && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); } else { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); } } /// maintain inter fronts __global__ void maintainIntLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _travPrims.getBV(cp.x); int t; if (!_tks.overlaps(st, bv)) { for (t = _lvs.getlca(_tks.getrangex(st)) >> 1, st--; st >= t && !_tks.overlaps(st, bv); st--); if (st < t && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); return; } cp.y = _lvs.getlca(_tks.getrangey(st) + 1); st = (_tks.getlc(st) << 1) | (_tks.getmark(st) & 1); do { t = st & 1; st >>= 1; if (!t) for (t = _lvs.getpar(idx = _tks.getrangex(st)); st <= t && _tks.overlaps(st, bv); st++); else t = st - 1, idx = st; if (st > t) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = make_int2(cp.x, idx); atomicAdd(&_log.extcnt(idx), 1); if (_lvs.overlaps(idx, bv)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(idx), _travPrims.getidx(cp.x)); } st = _lvs.getlca(idx + 1); } else { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); st = _lvs.getlca(_tks.getrangey(st) + 1); } } while (st != cp.y); } /// ext fronts __global__ void maintainExtLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); const int2 cp = _extFront[idx]; int st = cp.y, gfa; const BOX bv = _travPrims.getBV(cp.x); if (!_lvs.overlaps(st, bv)) { gfa = _lvs.getpar(st); if (_tks.overlaps(gfa, bv)) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); return; } if ((_lvs.getmark(idx = st) & 4) == 4) ///< or _lca[idx = st] & 1 return; for (st = gfa - 1, gfa = _lvs.getlca(idx) >> 1; st >= gfa && !_tks.overlaps(st, bv); st--); if (st < gfa && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); } else { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(st), _travPrims.getidx(cp.x)); } } __global__ void sproutIntLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, BvhExtNodeCompletePort _lvs, BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _travPrims.getBV(cp.x); if (!_tks.overlaps(st, bv)) { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); return; } cp.y = _lvs.getlca(_tks.getrangey(st) + 1); st = (_tks.getlc(st) << 1) | (_tks.getmark(st) & 1); do { int t = st & 1; st >>= 1; if (!t) for (t = _lvs.getpar(idx = _tks.getrangex(st)); st <= t && _tks.overlaps(st, bv); st++); else t = st - 1, idx = st; if (st > t) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = make_int2(cp.x, idx); atomicAdd(&_log.extcnt(idx), 1); if (_lvs.overlaps(idx, bv)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(idx), _travPrims.getidx(cp.x)); } st = _lvs.getlca(idx + 1); } else { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); st = _lvs.getlca(_tks.getrangey(st) + 1); } } while (st != cp.y); } __global__ void sproutExtLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, BvhExtNodeCompletePort _lvs, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto& _prims = _lvs.primPort(); int2 cp = _extFront[idx]; atomicAdd(&_log.extcnt(cp.y), 1); if (_lvs.overlaps(cp.x, cp.y)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.y), _travPrims.getidx(cp.x)); } } __global__ void pruneIntLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _travPrims.getBV(cp.x); int t; /// certainly not colliding for (t = _lvs.getlca(_tks.getrangex(st)) >> 1, st--; st >= t && !_tks.overlaps(st, bv); st--); if (st < t && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); return; } /// ext fronts __global__ void pruneExtLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); const int2 cp = _extFront[idx]; int st = cp.y, gfa; const BOX bv = _travPrims.getBV(cp.x); if (!_lvs.overlaps(st, bv)) { gfa = _lvs.getpar(st); if (_tks.overlaps(gfa, bv)) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); return; } if ((_lvs.getmark(idx = st) & 4) == 4) ///< or _lca[idx = st] & 1 return; for (st = gfa - 1, gfa = _lvs.getlca(idx) >> 1; st >= gfa && !_tks.overlaps(st, bv); st--); if (st < gfa && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); } else { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); } } }
7d8f8a57e3f02dd67f33c08536d222fd34c77e1a.cu
#include "BvttFrontLooseKernels.cuh" #include <cuda_runtime.h> #include "utility\CudaDeviceUtils.h" #include "setting\CDBenchmarkSettings.h" #include "collision\lbvh\BvhExtNode.h" #include "collision\lbvh\BvhIntNode.h" #include "collision\auxiliary\FlOrderLog.h" namespace mn { /// maintain intra fronts __global__ void maintainIntLooseIntraFrontsWithLog(const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _prims.getBV(cp.x); int t; #if MACRO_VERSION const int3 ids = _prims.getVids(cp.x); #endif if (!_tks.overlaps(st, bv)) { ///< prune for (t = _lvs.getlca(_tks.getrangex(st)) >> 1, st--; st >= t && !_tks.overlaps(st, bv); st--); if (st < t && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); return; } cp.y = _lvs.getlca(_tks.getrangey(st) + 1); st = (_tks.getlc(st) << 1) | (_tks.getmark(st) & 1); do { ///< sprout t = st & 1; st >>= 1; if (!t) for (t = _lvs.getpar(idx = _tks.getrangex(st)); st <= t && _tks.overlaps(st, bv); st++); else t = st - 1, idx = st; if (st > t) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = make_int2(cp.x, idx); atomicAdd(&_log.extcnt(idx), 1); if ( #if MACRO_VERSION !covertex(ids, _prims.getVids(idx)) && #endif _lvs.overlaps(idx, bv)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.x), _prims.getidx(idx)); } st = _lvs.getlca(idx + 1); } else { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); st = _lvs.getlca(_tks.getrangey(st) + 1); } } while (st != cp.y); } /// ext fronts __global__ void maintainExtLooseIntraFrontsWithLog(const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); const int2 cp = _extFront[idx]; int st = cp.y, gfa; const BOX bv = _prims.getBV(cp.x); if (!_lvs.overlaps(st, bv)) { gfa = _lvs.getpar(st); if (_tks.overlaps(gfa, bv)) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); return; } if ((_lvs.getmark(idx = st) & 4) == 4) ///< or _lca[st] & 1 return; for (st = gfa - 1, gfa = _lvs.getlca(idx) >> 1; st >= gfa && !_tks.overlaps(st, bv); st--); if (st < gfa && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); } else { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); #if MACRO_VERSION if (!covertex(_prims.getVids(cp.x), _prims.getVids(st))) #endif _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.x), _prims.getidx(st)); } } __global__ void sproutIntLooseIntraFrontsWithLog(BvhExtNodeCompletePort _lvs, BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _prims.getBV(cp.x); if (!_tks.overlaps(st, bv)) { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); return; } cp.y = _lvs.getlca(_tks.getrangey(st) + 1); st = (_tks.getlc(st) << 1) | (_tks.getmark(st) & 1); #if MACRO_VERSION const int3 ids = _prims.getVids(cp.x); #endif do { int t = st & 1; st >>= 1; if (!t) for (t = _lvs.getpar(idx = _tks.getrangex(st)); st <= t && _tks.overlaps(st, bv); st++); else t = st - 1, idx = st; if (st > t) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = make_int2(cp.x, idx); atomicAdd(&_log.extcnt(idx), 1); if ( #if MACRO_VERSION !covertex(ids, _prims.getVids(idx)) && #endif _lvs.overlaps(idx, bv)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.x), _prims.getidx(idx)); } st = _lvs.getlca(idx + 1); } else { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); st = _lvs.getlca(_tks.getrangey(st) + 1); } } while (st != cp.y); } __global__ void sproutExtLooseIntraFrontsWithLog(BvhExtNodeCompletePort _lvs, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto& _prims = _lvs.primPort(); int2 cp = _extFront[idx]; atomicAdd(&_log.extcnt(cp.y), 1); if ( #if MACRO_VERSION !covertex(_prims.getVids(cp.x), _prims.getVids(cp.y)) && #endif _lvs.overlaps(cp.x, cp.y)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.x), _prims.getidx(cp.y)); } } __global__ void pruneIntLooseIntraFrontsWithLog(const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _prims.getBV(cp.x); int t; /// assume not colliding for (t = _lvs.getlca(_tks.getrangex(st)) >> 1, st--; st >= t && !_tks.overlaps(st, bv); st--); if (st < t && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); return; } __global__ void pruneExtLooseIntraFrontsWithLog(const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); const int2 cp = _extFront[idx]; int st = cp.y, gfa; const BOX bv = _prims.getBV(cp.x); if (!_lvs.overlaps(st, bv)) { gfa = _lvs.getpar(st); if (_tks.overlaps(gfa, bv)) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); return; } if ((_lvs.getmark(idx = st) & 4) == 4) ///< or _lca[st] & 1 return; for (st = gfa - 1, gfa = _lvs.getlca(idx) >> 1; st >= gfa && !_tks.overlaps(st, bv); st--); if (st < gfa && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); } else { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); } } /// maintain inter fronts __global__ void maintainIntLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _travPrims.getBV(cp.x); int t; if (!_tks.overlaps(st, bv)) { for (t = _lvs.getlca(_tks.getrangex(st)) >> 1, st--; st >= t && !_tks.overlaps(st, bv); st--); if (st < t && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); return; } cp.y = _lvs.getlca(_tks.getrangey(st) + 1); st = (_tks.getlc(st) << 1) | (_tks.getmark(st) & 1); do { t = st & 1; st >>= 1; if (!t) for (t = _lvs.getpar(idx = _tks.getrangex(st)); st <= t && _tks.overlaps(st, bv); st++); else t = st - 1, idx = st; if (st > t) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = make_int2(cp.x, idx); atomicAdd(&_log.extcnt(idx), 1); if (_lvs.overlaps(idx, bv)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(idx), _travPrims.getidx(cp.x)); } st = _lvs.getlca(idx + 1); } else { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); st = _lvs.getlca(_tks.getrangey(st) + 1); } } while (st != cp.y); } /// ext fronts __global__ void maintainExtLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); const int2 cp = _extFront[idx]; int st = cp.y, gfa; const BOX bv = _travPrims.getBV(cp.x); if (!_lvs.overlaps(st, bv)) { gfa = _lvs.getpar(st); if (_tks.overlaps(gfa, bv)) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); return; } if ((_lvs.getmark(idx = st) & 4) == 4) ///< or _lca[idx = st] & 1 return; for (st = gfa - 1, gfa = _lvs.getlca(idx) >> 1; st >= gfa && !_tks.overlaps(st, bv); st--); if (st < gfa && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); } else { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(st), _travPrims.getidx(cp.x)); } } __global__ void sproutIntLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, BvhExtNodeCompletePort _lvs, BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _travPrims.getBV(cp.x); if (!_tks.overlaps(st, bv)) { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); return; } cp.y = _lvs.getlca(_tks.getrangey(st) + 1); st = (_tks.getlc(st) << 1) | (_tks.getmark(st) & 1); do { int t = st & 1; st >>= 1; if (!t) for (t = _lvs.getpar(idx = _tks.getrangex(st)); st <= t && _tks.overlaps(st, bv); st++); else t = st - 1, idx = st; if (st > t) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = make_int2(cp.x, idx); atomicAdd(&_log.extcnt(idx), 1); if (_lvs.overlaps(idx, bv)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(idx), _travPrims.getidx(cp.x)); } st = _lvs.getlca(idx + 1); } else { _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st); atomicAdd(&_log.intcnt(st), 1); st = _lvs.getlca(_tks.getrangey(st) + 1); } } while (st != cp.y); } __global__ void sproutExtLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, BvhExtNodeCompletePort _lvs, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, int *_cpNum, int2 *_cpRes) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto& _prims = _lvs.primPort(); int2 cp = _extFront[idx]; atomicAdd(&_log.extcnt(cp.y), 1); if (_lvs.overlaps(cp.x, cp.y)) { _cpRes[atomicAdd(_cpNum, 1)] = make_int2(_prims.getidx(cp.y), _travPrims.getidx(cp.x)); } } __global__ void pruneIntLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_intFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); int2 cp = _intFront[idx]; int st = cp.y; const BOX bv = _travPrims.getBV(cp.x); int t; /// certainly not colliding for (t = _lvs.getlca(_tks.getrangex(st)) >> 1, st--; st >= t && !_tks.overlaps(st, bv); st--); if (st < t && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); return; } /// ext fronts __global__ void pruneExtLooseInterFrontsWithLog(const BvhPrimitiveCompletePort _travPrims, const BvhExtNodeCompletePort _lvs, const BvhIntNodeCompletePort _tks, uint ftSize, const int2 *_extFront, FlOrderCompletePort _log, uint *_ftSlideSizes, int2 **_slideFtLists) { uint idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= ftSize) return; const auto _prims = _lvs.getPrimPort(); const int2 cp = _extFront[idx]; int st = cp.y, gfa; const BOX bv = _travPrims.getBV(cp.x); if (!_lvs.overlaps(st, bv)) { gfa = _lvs.getpar(st); if (_tks.overlaps(gfa, bv)) { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); return; } if ((_lvs.getmark(idx = st) & 4) == 4) ///< or _lca[idx = st] & 1 return; for (st = gfa - 1, gfa = _lvs.getlca(idx) >> 1; st >= gfa && !_tks.overlaps(st, bv); st--); if (st < gfa && (st + 1 > 0 && !_tks.overlaps(_tks.getpar(st + 1), bv))) return; _slideFtLists[0][atomicAdd(_ftSlideSizes, 1)] = make_int2(cp.x, st + 1); atomicAdd(&_log.intcnt(st + 1), 1); } else { _slideFtLists[1][atomicAdd(_ftSlideSizes + 1, 1)] = cp; atomicAdd(&_log.extcnt(st), 1); } } }
ac42be4af0ebe8823dfd02750f5c8f4f4eb360a9.hip
// !!! This is a file automatically generated by hipify!!! // RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args #include <iostream> #include <algorithm> // CHECK: #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> template<typename T> __global__ void axpy(T a, T *x, T *y) { y[threadIdx.x] = a * x[threadIdx.x]; } template<typename T1, typename T2> __global__ void axpy_2(T1 a, T2 *x, T2 *y) { y[threadIdx.x] = a * x[threadIdx.x]; } template<typename T> __global__ void axpy_empty() { } __global__ void empty() { } __global__ void nonempty(int x, int y, int z) { } int main(int argc, char* argv[]) { const int kDataLen = 4; float a = 2.0f; float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f}; float host_y[kDataLen]; // Copy input data to device. float* device_x; float* device_y; // CHECK: hipMalloc(&device_x, kDataLen * sizeof(float)); hipMalloc(&device_x, kDataLen * sizeof(float)); // CHECK: hipMalloc(&device_y, kDataLen * sizeof(float)); hipMalloc(&device_y, kDataLen * sizeof(float)); // CHECK: hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice); int x = 1, y = 2, z = 3; size_t N = 32; // CHECK: hipStream_t stream = NULL; hipStream_t stream = NULL; // CHECK: hipStreamCreate(&stream); hipStreamCreate(&stream); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(dim3(1)), dim3(kDataLen), 0, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(dim3(kDataLen)), 0, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(dim3(1)), dim3(dim3(kDataLen)), 0, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(dim3(1)), dim3(kDataLen), N, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(dim3(kDataLen)), N, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(dim3(1)), dim3(dim3(kDataLen)), N, 0, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(dim3(1)), dim3(kDataLen), N, stream, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(1), dim3(dim3(kDataLen)), N, stream, a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y); hipLaunchKernelGGL(( axpy<float>), dim3(dim3(1)), dim3(dim3(kDataLen)), N, stream, a, device_x, device_y); double h_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f}; double h_y[kDataLen]; // Copy input data to device. double* d_x; double* d_y; // CHECK: hipMalloc(&d_x, kDataLen * sizeof(double)); hipMalloc(&d_x, kDataLen * sizeof(double)); // CHECK: hipMalloc(&d_y, kDataLen * sizeof(double)); hipMalloc(&d_y, kDataLen * sizeof(double)); // CHECK: hipMemcpy(d_x, h_x, kDataLen * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_x, h_x, kDataLen * sizeof(double), hipMemcpyHostToDevice); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(1), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); hipLaunchKernelGGL(( axpy_2<float,double>), dim3(1), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(1,1,1), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); hipLaunchKernelGGL(( axpy_2<float,double>), dim3(dim3(1,1,1)), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(1), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); hipLaunchKernelGGL(( axpy_2<float,double>), dim3(1), dim3(dim3(kDataLen*2+10)), N*N, stream, a, d_x, d_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(1,1,1), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); hipLaunchKernelGGL(( axpy_2<float,double>), dim3(dim3(1,1,1)), dim3(dim3(kDataLen*2+10)), N*N, stream, a, d_x, d_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0); hipLaunchKernelGGL(( axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0); hipLaunchKernelGGL(( axpy_empty<float>), dim3(dim3(1)), dim3(kDataLen), 0, 0, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0); hipLaunchKernelGGL(( axpy_empty<float>), dim3(1), dim3(dim3(kDataLen)), 0, 0, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0); hipLaunchKernelGGL(( axpy_empty<float>), dim3(dim3(1)), dim3(dim3(kDataLen)), 0, 0, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0); hipLaunchKernelGGL(( axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0); hipLaunchKernelGGL(( axpy_empty<float>), dim3(dim3(1)), dim3(kDataLen), N, 0, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0); hipLaunchKernelGGL(( axpy_empty<float>), dim3(1), dim3(dim3(kDataLen)), N, 0, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0); hipLaunchKernelGGL(( axpy_empty<float>), dim3(dim3(1)), dim3(dim3(kDataLen)), N, 0, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream); hipLaunchKernelGGL(( axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream); hipLaunchKernelGGL(( axpy_empty<float>), dim3(dim3(1)), dim3(kDataLen), N, stream, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream); hipLaunchKernelGGL(( axpy_empty<float>), dim3(1), dim3(dim3(kDataLen)), N, stream, ); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream); hipLaunchKernelGGL(( axpy_empty<float>), dim3(dim3(1)), dim3(dim3(kDataLen)), N, stream, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0); hipLaunchKernelGGL(( empty), dim3(1), dim3(kDataLen), 0, 0, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0); hipLaunchKernelGGL(( empty), dim3(dim3(1)), dim3(kDataLen), 0, 0, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0); hipLaunchKernelGGL(( empty), dim3(1), dim3(dim3(kDataLen)), 0, 0, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0); hipLaunchKernelGGL(( empty), dim3(dim3(1)), dim3(dim3(kDataLen)), 0, 0, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0); hipLaunchKernelGGL(( empty), dim3(1), dim3(kDataLen), N, 0, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0); hipLaunchKernelGGL(( empty), dim3(dim3(1)), dim3(kDataLen), N, 0, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0); hipLaunchKernelGGL(( empty), dim3(1), dim3(dim3(kDataLen)), N, 0, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0); hipLaunchKernelGGL(( empty), dim3(dim3(1)), dim3(dim3(kDataLen)), N, 0, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream); hipLaunchKernelGGL(( empty), dim3(1), dim3(kDataLen), N, stream, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream); hipLaunchKernelGGL(( empty), dim3(dim3(1)), dim3(kDataLen), N, stream, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream); hipLaunchKernelGGL(( empty), dim3(1), dim3(dim3(kDataLen)), N, stream, ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream); hipLaunchKernelGGL(( empty), dim3(dim3(1)), dim3(dim3(kDataLen)), N, stream, ); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(1), dim3(kDataLen), 0, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(1)), dim3(kDataLen), 0, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(1), dim3(dim3(kDataLen)), 0, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(1)), dim3(dim3(kDataLen)), 0, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(1), dim3(kDataLen), N, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(1)), dim3(kDataLen), N, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(1), dim3(dim3(kDataLen)), N, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(1)), dim3(dim3(kDataLen)), N, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(1), dim3(kDataLen), N, stream, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(1)), dim3(kDataLen), N, stream, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(1), dim3(dim3(kDataLen)), N, stream, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(1)), dim3(dim3(kDataLen)), N, stream, x, y, z); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(x,y,z), dim3(::min(kDataLen*2+10,x)), ::min(x,y), stream, a, ::min(d_x,d_y), ::max(d_x,d_y)); hipLaunchKernelGGL(( axpy_2<float,double>), dim3(dim3(x,y,z)), dim3(::min(kDataLen*2+10,x)), ::min(x,y), stream, a, ::min(d_x,d_y), ::max(d_x,d_y)); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(x,y,z), dim3(::min(kDataLen*2+10,x)), ::min(x,y), 0, a, ::min(d_x,d_y), ::max(d_x,d_y)); hipLaunchKernelGGL(( axpy_2<float,double>), dim3(dim3(x,y,z)), dim3(::min(kDataLen*2+10,x)), ::min(x,y), 0, a, ::min(d_x,d_y), ::max(d_x,d_y)); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(x,y,z), dim3(::min(kDataLen*2+10,x)), 0, 0, a, ::min(d_x,d_y), ::max(d_x,d_y)); hipLaunchKernelGGL(( axpy_2<float,double>), dim3(dim3(x,y,z)), dim3(::min(kDataLen*2+10,x)), 0, 0, a, ::min(d_x,d_y), ::max(d_x,d_y)); // CHECK: hipLaunchKernelGGL(nonempty, dim3(x,y,z), dim3(x,y,::min(y,z)), 0, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(x,y,z)), dim3(dim3(x,y,::min(y,z))), 0, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(x,y,z), dim3(x,y,::min(::max(x,y),z)), 0, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(x,y,z)), dim3(dim3(x,y,::min(::max(x,y),z))), 0, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(x,y,z), dim3(x,y,::min(::max(x,int(N)),z)), 0, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(x,y,z)), dim3(dim3(x,y,::min(::max(x,int(N)),z))), 0, 0, x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(x,y,z), dim3(x,y,::min(::max(x,int(N+N -x/y + y*1)),z)), 0, 0, x, y, z); hipLaunchKernelGGL(( nonempty), dim3(dim3(x,y,z)), dim3(dim3(x,y,::min(::max(x,int(N+N -x/y + y*1)),z))), 0, 0, x, y, z); // Copy output data to host. // CHECK: hipDeviceSynchronize(); hipDeviceSynchronize(); // CHECK: hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost); // Print the results. for (int i = 0; i < kDataLen; ++i) { std::cout << "y[" << i << "] = " << host_y[i] << "\n"; } // CHECK: hipDeviceReset(); hipDeviceReset(); return 0; }
ac42be4af0ebe8823dfd02750f5c8f4f4eb360a9.cu
// RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args #include <iostream> #include <algorithm> // CHECK: #include <hip/hip_runtime.h> #include <cuda.h> template<typename T> __global__ void axpy(T a, T *x, T *y) { y[threadIdx.x] = a * x[threadIdx.x]; } template<typename T1, typename T2> __global__ void axpy_2(T1 a, T2 *x, T2 *y) { y[threadIdx.x] = a * x[threadIdx.x]; } template<typename T> __global__ void axpy_empty() { } __global__ void empty() { } __global__ void nonempty(int x, int y, int z) { } int main(int argc, char* argv[]) { const int kDataLen = 4; float a = 2.0f; float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f}; float host_y[kDataLen]; // Copy input data to device. float* device_x; float* device_y; // CHECK: hipMalloc(&device_x, kDataLen * sizeof(float)); cudaMalloc(&device_x, kDataLen * sizeof(float)); // CHECK: hipMalloc(&device_y, kDataLen * sizeof(float)); cudaMalloc(&device_y, kDataLen * sizeof(float)); // CHECK: hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice); cudaMemcpy(device_x, host_x, kDataLen * sizeof(float), cudaMemcpyHostToDevice); int x = 1, y = 2, z = 3; size_t N = 32; // CHECK: hipStream_t stream = NULL; cudaStream_t stream = NULL; // CHECK: hipStreamCreate(&stream); cudaStreamCreate(&stream); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); axpy<float><<<1, kDataLen>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); axpy<float><<<dim3(1), kDataLen>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); axpy<float><<<1, dim3(kDataLen)>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); axpy<float><<<dim3(1), dim3(kDataLen)>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y); axpy<float><<<1, kDataLen, N>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y); axpy<float><<<dim3(1), kDataLen, N>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y); axpy<float><<<1, dim3(kDataLen), N>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, 0, a, device_x, device_y); axpy<float><<<dim3(1), dim3(kDataLen), N>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y); axpy<float><<<1, kDataLen, N, stream>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y); axpy<float><<<dim3(1), kDataLen, N, stream>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y); axpy<float><<<1, dim3(kDataLen), N, stream>>>(a, device_x, device_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy<float>), dim3(1), dim3(kDataLen), N, stream, a, device_x, device_y); axpy<float><<<dim3(1), dim3(kDataLen), N, stream>>>(a, device_x, device_y); double h_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f}; double h_y[kDataLen]; // Copy input data to device. double* d_x; double* d_y; // CHECK: hipMalloc(&d_x, kDataLen * sizeof(double)); cudaMalloc(&d_x, kDataLen * sizeof(double)); // CHECK: hipMalloc(&d_y, kDataLen * sizeof(double)); cudaMalloc(&d_y, kDataLen * sizeof(double)); // CHECK: hipMemcpy(d_x, h_x, kDataLen * sizeof(double), hipMemcpyHostToDevice); cudaMemcpy(d_x, h_x, kDataLen * sizeof(double), cudaMemcpyHostToDevice); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(1), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); axpy_2<float,double><<<1, kDataLen*2+10, N*N, stream>>>(a, d_x, d_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(1,1,1), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); axpy_2<float,double><<<dim3(1,1,1), kDataLen*2+10, N*N, stream>>>(a, d_x, d_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(1), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); axpy_2<float,double><<<1, dim3(kDataLen*2+10), N*N, stream>>>(a, d_x, d_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(1,1,1), dim3(kDataLen*2+10), N*N, stream, a, d_x, d_y); axpy_2<float,double><<<dim3(1,1,1), dim3(kDataLen*2+10), N*N, stream>>>(a, d_x, d_y); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0); axpy_empty<float><<<1, kDataLen>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0); axpy_empty<float><<<dim3(1), kDataLen>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0); axpy_empty<float><<<1, dim3(kDataLen)>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), 0, 0); axpy_empty<float><<<dim3(1), dim3(kDataLen)>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0); axpy_empty<float><<<1, kDataLen, N>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0); axpy_empty<float><<<dim3(1), kDataLen, N>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0); axpy_empty<float><<<1, dim3(kDataLen), N>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, 0); axpy_empty<float><<<dim3(1), dim3(kDataLen), N>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream); axpy_empty<float><<<1, kDataLen, N, stream>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream); axpy_empty<float><<<dim3(1), kDataLen, N, stream>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream); axpy_empty<float><<<1, dim3(kDataLen), N, stream>>>(); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_empty<float>), dim3(1), dim3(kDataLen), N, stream); axpy_empty<float><<<dim3(1), dim3(kDataLen), N, stream>>>(); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0); empty<<<1, kDataLen>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0); empty<<<dim3(1), kDataLen>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0); empty<<<1, dim3(kDataLen)>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), 0, 0); empty<<<dim3(1), dim3(kDataLen)>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0); empty<<<1, kDataLen, N>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0); empty<<<dim3(1), kDataLen, N>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0); empty<<<1, dim3(kDataLen), N>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, 0); empty<<<dim3(1), dim3(kDataLen), N>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream); empty<<<1, kDataLen, N, stream>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream); empty<<<dim3(1), kDataLen, N, stream>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream); empty<<<1, dim3(kDataLen), N, stream>>> ( ); // CHECK: hipLaunchKernelGGL(empty, dim3(1), dim3(kDataLen), N, stream); empty<<<dim3(1), dim3(kDataLen), N, stream>>> ( ); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z); nonempty<<<1, kDataLen>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z); nonempty<<<dim3(1), kDataLen>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z); nonempty<<<1, dim3(kDataLen)>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), 0, 0, x, y, z); nonempty<<<dim3(1), dim3(kDataLen)>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z); nonempty<<<1, kDataLen, N>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z); nonempty<<<dim3(1), kDataLen, N>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z); nonempty<<<1, dim3(kDataLen), N>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, 0, x, y, z); nonempty<<<dim3(1), dim3(kDataLen), N>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z); nonempty<<<1, kDataLen, N, stream>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z); nonempty<<<dim3(1), kDataLen, N, stream>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z); nonempty<<<1, dim3(kDataLen), N, stream>>> (x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(1), dim3(kDataLen), N, stream, x, y, z); nonempty<<<dim3(1), dim3(kDataLen), N, stream>>> (x, y, z); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(x,y,z), dim3(std::min(kDataLen*2+10,x)), std::min(x,y), stream, a, std::min(d_x,d_y), std::max(d_x,d_y)); axpy_2<float,double><<<dim3(x,y,z), std::min(kDataLen*2+10,x), std::min(x,y), stream>>>(a, std::min(d_x,d_y), std::max(d_x,d_y)); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(x,y,z), dim3(std::min(kDataLen*2+10,x)), std::min(x,y), 0, a, std::min(d_x,d_y), std::max(d_x,d_y)); axpy_2<float,double><<<dim3(x,y,z), std::min(kDataLen*2+10,x), std::min(x,y)>>>(a, std::min(d_x,d_y), std::max(d_x,d_y)); // CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(axpy_2<float,double>), dim3(x,y,z), dim3(std::min(kDataLen*2+10,x)), 0, 0, a, std::min(d_x,d_y), std::max(d_x,d_y)); axpy_2<float,double><<<dim3(x,y,z), std::min(kDataLen*2+10,x)>>>(a, std::min(d_x,d_y), std::max(d_x,d_y)); // CHECK: hipLaunchKernelGGL(nonempty, dim3(x,y,z), dim3(x,y,std::min(y,z)), 0, 0, x, y, z); nonempty<<<dim3(x,y,z), dim3(x,y,std::min(y,z))>>>(x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(x,y,z), dim3(x,y,std::min(std::max(x,y),z)), 0, 0, x, y, z); nonempty<<<dim3(x,y,z), dim3(x,y,std::min(std::max(x,y),z))>>>(x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(x,y,z), dim3(x,y,std::min(std::max(x,int(N)),z)), 0, 0, x, y, z); nonempty<<<dim3(x,y,z), dim3(x,y,std::min(std::max(x,int(N)),z))>>>(x, y, z); // CHECK: hipLaunchKernelGGL(nonempty, dim3(x,y,z), dim3(x,y,std::min(std::max(x,int(N+N -x/y + y*1)),z)), 0, 0, x, y, z); nonempty<<<dim3(x,y,z), dim3(x,y,std::min(std::max(x,int(N+N -x/y + y*1)),z))>>>(x, y, z); // Copy output data to host. // CHECK: hipDeviceSynchronize(); cudaDeviceSynchronize(); // CHECK: hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost); cudaMemcpy(host_y, device_y, kDataLen * sizeof(float), cudaMemcpyDeviceToHost); // Print the results. for (int i = 0; i < kDataLen; ++i) { std::cout << "y[" << i << "] = " << host_y[i] << "\n"; } // CHECK: hipDeviceReset(); cudaDeviceReset(); return 0; }
9dcc2859c65a60d54e878d812c5382673c018c2a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> #include <hip/hip_runtime.h> #include "softmax_common.h" #include "softmax.cuh" using namespace std; extern "C" float softmax( float *input, float *output, int size ){ dim3 blocks(1,1); dim3 threads(size, 1); hipError_t error; hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( softmax_kernel), dim3(blocks), dim3(threads), 0, 0, input, output, size ); // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } return msecTotal; }
9dcc2859c65a60d54e878d812c5382673c018c2a.cu
#include <iostream> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> #include <cuda_runtime.h> #include "softmax_common.h" #include "softmax.cuh" using namespace std; extern "C" float softmax( float *input, float *output, int size ){ dim3 blocks(1,1); dim3 threads(size, 1); cudaError_t error; cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } softmax_kernel<<<blocks, threads>>>( input, output, size ); // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } return msecTotal; }
cuPow.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" /* * Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'. */ extern "C" __global__ void cuPow(int n, float *a, float b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = pow(a[i],b); } }
cuPow.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" /* * Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'. */ extern "C" __global__ void cuPow(int n, float *a, float b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = pow(a[i],b); } }
01dcee042ac9db87b7658de841ebf6a28478bbf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "MatrixMult.h" #include "MatrixMult.cu.h" #define WIDTH_A 1024//1024 //1024//2048 #define HEIGHT_A 1024//2048//2048//2048 #define WIDTH_B 1024//1536//4096//2048 #define TILE 16 ///////////////////////////////////////////////////////// // Program main ///////////////////////////////////////////////////////// int main() { // set seed for rand() srand(2006); // 1. allocate host memory for the two matrices unsigned int size_A = WIDTH_A * HEIGHT_A; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WIDTH_B * WIDTH_A; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*) malloc(mem_size_B); // 2. initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); // 3. allocate device memory float* d_A; float* d_B; hipMalloc((void**) &d_A, mem_size_A); hipMalloc((void**) &d_B, mem_size_B); // 4. copy host memory to device hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); // 5. allocate host memory for the result C unsigned int size_C = HEIGHT_A * WIDTH_B; unsigned int mem_size_C = sizeof(float) * size_C; float* h_C = (float*) malloc(mem_size_C); float* seq_C = (float*) malloc(mem_size_C); // 6. allocate device memory for the result float* d_C; hipMalloc((void**) &d_C, mem_size_C); // 7. compute sequential matrix multiplication { unsigned long int elapsed; struct timeval t_start, t_end, t_diff; gettimeofday(&t_start, NULL); matMult<float>(h_A, h_B, seq_C, WIDTH_A, HEIGHT_A, WIDTH_B); gettimeofday(&t_end, NULL); timeval_subtract(&t_diff, &t_end, &t_start); elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec); printf("Sequential Naive version runs in: %lu microsecs\n", elapsed); } // 8. perform the calculation // setup execution parameters int dimy = ceil( ((float)HEIGHT_A)/TILE ); int dimx = ceil( ((float) WIDTH_B)/TILE ); dim3 block(TILE, TILE, 1); dim3 grid (dimx, dimy, 1); // execute the kernel { unsigned long int elapsed; struct timeval t_start, t_end, t_diff; gettimeofday(&t_start, NULL); //matMultKer<float> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B); hipLaunchKernelGGL(( matMultTiledKer<float,TILE>) , dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B); //matMultCacheKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B); hipDeviceSynchronize(); gettimeofday(&t_end, NULL); timeval_subtract(&t_diff, &t_end, &t_start); elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec); printf("GPU version runs in: %lu microsecs\n", elapsed); float microsecPerMatrixMul = elapsed; double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f)); printf( "Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y); } // 11. copy result from device to host hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); // 12. validate validate<float>(seq_C, h_C, size_C); // 7. clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); }
01dcee042ac9db87b7658de841ebf6a28478bbf9.cu
#include "MatrixMult.h" #include "MatrixMult.cu.h" #define WIDTH_A 1024//1024 //1024//2048 #define HEIGHT_A 1024//2048//2048//2048 #define WIDTH_B 1024//1536//4096//2048 #define TILE 16 ///////////////////////////////////////////////////////// // Program main ///////////////////////////////////////////////////////// int main() { // set seed for rand() srand(2006); // 1. allocate host memory for the two matrices unsigned int size_A = WIDTH_A * HEIGHT_A; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WIDTH_B * WIDTH_A; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*) malloc(mem_size_B); // 2. initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); // 3. allocate device memory float* d_A; float* d_B; cudaMalloc((void**) &d_A, mem_size_A); cudaMalloc((void**) &d_B, mem_size_B); // 4. copy host memory to device cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); // 5. allocate host memory for the result C unsigned int size_C = HEIGHT_A * WIDTH_B; unsigned int mem_size_C = sizeof(float) * size_C; float* h_C = (float*) malloc(mem_size_C); float* seq_C = (float*) malloc(mem_size_C); // 6. allocate device memory for the result float* d_C; cudaMalloc((void**) &d_C, mem_size_C); // 7. compute sequential matrix multiplication { unsigned long int elapsed; struct timeval t_start, t_end, t_diff; gettimeofday(&t_start, NULL); matMult<float>(h_A, h_B, seq_C, WIDTH_A, HEIGHT_A, WIDTH_B); gettimeofday(&t_end, NULL); timeval_subtract(&t_diff, &t_end, &t_start); elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec); printf("Sequential Naive version runs in: %lu microsecs\n", elapsed); } // 8. perform the calculation // setup execution parameters int dimy = ceil( ((float)HEIGHT_A)/TILE ); int dimx = ceil( ((float) WIDTH_B)/TILE ); dim3 block(TILE, TILE, 1); dim3 grid (dimx, dimy, 1); // execute the kernel { unsigned long int elapsed; struct timeval t_start, t_end, t_diff; gettimeofday(&t_start, NULL); //matMultKer<float> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B); matMultTiledKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B); //matMultCacheKer<float,TILE> <<< grid, block >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, WIDTH_B); cudaThreadSynchronize(); gettimeofday(&t_end, NULL); timeval_subtract(&t_diff, &t_end, &t_start); elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec); printf("GPU version runs in: %lu microsecs\n", elapsed); float microsecPerMatrixMul = elapsed; double flopsPerMatrixMul = 2.0 * HEIGHT_A * WIDTH_B * WIDTH_A; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (microsecPerMatrixMul / (1000.0f * 1000.0f)); printf( "Performance= %.2f GFlop/s, Time= %.3f microsec %d %d\n", gigaFlops, microsecPerMatrixMul, grid.x, grid.y); } // 11. copy result from device to host cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); // 12. validate validate<float>(seq_C, h_C, size_C); // 7. clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
629b2ecce55aa7a3748c2b9ff57391ea32c979a6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "bankConflictsRead.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *outFloat = NULL; hipMalloc(&outFloat, XSIZE*YSIZE); int iStride = 2; unsigned long long *ullTime = NULL; hipMalloc(&ullTime, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( bankConflictsRead), dim3(gridBlock),dim3(threadBlock), 0, 0, outFloat,iStride,ullTime); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( bankConflictsRead), dim3(gridBlock),dim3(threadBlock), 0, 0, outFloat,iStride,ullTime); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( bankConflictsRead), dim3(gridBlock),dim3(threadBlock), 0, 0, outFloat,iStride,ullTime); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
629b2ecce55aa7a3748c2b9ff57391ea32c979a6.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "bankConflictsRead.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *outFloat = NULL; cudaMalloc(&outFloat, XSIZE*YSIZE); int iStride = 2; unsigned long long *ullTime = NULL; cudaMalloc(&ullTime, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); bankConflictsRead<<<gridBlock,threadBlock>>>(outFloat,iStride,ullTime); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { bankConflictsRead<<<gridBlock,threadBlock>>>(outFloat,iStride,ullTime); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { bankConflictsRead<<<gridBlock,threadBlock>>>(outFloat,iStride,ullTime); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
503ce1246c516c385897371194434c674f295743.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
503ce1246c516c385897371194434c674f295743.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
fcf8422e19ed7a546bdb1f9e422912adee92568b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * adi.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Will Killian <killian@udel.edu> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #define POLYBENCH_TIME 1 #include "adi.cuh" #include "../../common/polybench.h" #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 2.5 #define GPU_DEVICE 0 #define RUN_ON_CPU void adi(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(X,N,N,n,n)) { for (int t = 0; t < _PB_TSTEPS; t++) { for (int i1 = 0; i1 < _PB_N; i1++) { for (int i2 = 1; i2 < _PB_N; i2++) { X[i1][i2] = X[i1][i2] - X[i1][(i2-1)] * A[i1][i2] / B[i1][(i2-1)]; B[i1][i2] = B[i1][i2] - A[i1][i2] * A[i1][i2] / B[i1][(i2-1)]; } } for (int i1 = 0; i1 < _PB_N; i1++) { X[i1][(N-1)] = X[i1][(N-1)] / B[i1][(N-1)]; } for (int i1 = 0; i1 < _PB_N; i1++) { for (int i2 = 0; i2 < _PB_N-2; i2++) { X[i1][(N-i2-2)] = (X[i1][(N-2-i2)] - X[i1][(N-2-i2-1)] * A[i1][(N-i2-3)]) / B[i1][(N-3-i2)]; } } for (int i1 = 1; i1 < _PB_N; i1++) { for (int i2 = 0; i2 < _PB_N; i2++) { X[i1][i2] = X[i1][i2] - X[(i1-1)][i2] * A[i1][i2] / B[(i1-1)][i2]; B[i1][i2] = B[i1][i2] - A[i1][i2] * A[i1][i2] / B[(i1-1)][i2]; } } for (int i2 = 0; i2 < _PB_N; i2++) { X[(N-1)][i2] = X[(N-1)][i2] / B[(N-1)][i2]; } for (int i1 = 0; i1 < _PB_N-2; i1++) { for (int i2 = 0; i2 < _PB_N; i2++) { X[(N-2-i1)][i2] = (X[(N-2-i1)][i2] - X[(N-i1-3)][i2] * A[(N-3-i1)][i2]) / B[(N-2-i1)][i2]; } } } } void init_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(X,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { X[i][j] = ((DATA_TYPE) i*(j+1) + 1) / N; A[i][j] = ((DATA_TYPE) (i-1)*(j+4) + 2) / N; B[i][j] = ((DATA_TYPE) (i+3)*(j+7) + 3) / N; } } } void compareResults(int n, DATA_TYPE POLYBENCH_2D(B_cpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(B_fromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(X_cpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(X_fromGpu,N,N,n,n)) { int i, j, fail; fail = 0; // Compare b and x output on cpu and gpu for (i=0; i < n; i++) { for (j=0; j < n; j++) { if (percentDiff(B_cpu[i][j], B_fromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } for (i=0; i<n; i++) { for (j=0; j<n; j++) { if (percentDiff(X_cpu[i][j], X_fromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void adi_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X) { int i1 = blockIdx.x * blockDim.x + threadIdx.x; if ((i1 < _PB_N)) { for (int i2 = 1; i2 < _PB_N; i2++) { X[i1*N + i2] = X[i1*N + i2] - X[i1*N + (i2-1)] * A[i1*N + i2] / B[i1*N + (i2-1)]; B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[i1*N + (i2-1)]; } } } __global__ void adi_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X) { int i1 = blockIdx.x * blockDim.x + threadIdx.x; if ((i1 < _PB_N)) { X[i1*N + (N-1)] = X[i1*N + (N-1)] / B[i1*N + (N-1)]; } } __global__ void adi_kernel3(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X) { int i1 = blockIdx.x * blockDim.x + threadIdx.x; if (i1 < _PB_N) { for (int i2 = 0; i2 < _PB_N-2; i2++) { X[i1*N + (N-i2-2)] = (X[i1*N + (N-2-i2)] - X[i1*N + (N-2-i2-1)] * A[i1*N + (N-i2-3)]) / B[i1*N + (N-3-i2)]; } } } __global__ void adi_kernel4(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, int i1) { int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < _PB_N) { X[i1*N + i2] = X[i1*N + i2] - X[(i1-1)*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2]; B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2]; } } __global__ void adi_kernel5(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X) { int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < _PB_N) { X[(N-1)*N + i2] = X[(N-1)*N + i2] / B[(N-1)*N + i2]; } } __global__ void adi_kernel6(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, int i1) { int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < _PB_N) { X[(N-2-i1)*N + i2] = (X[(N-2-i1)*N + i2] - X[(N-i1-3)*N + i2] * A[(N-3-i1)*N + i2]) / B[(N-2-i1)*N + i2]; } } void adiCuda(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(X,N,N,n,n), DATA_TYPE POLYBENCH_2D(B_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(X_outputFromGpu,N,N,n,n)) { DATA_TYPE* A_gpu; DATA_TYPE* B_gpu; DATA_TYPE* X_gpu; hipMalloc(&A_gpu, N * N * sizeof(DATA_TYPE)); hipMalloc(&B_gpu, N * N * sizeof(DATA_TYPE)); hipMalloc(&X_gpu, N * N * sizeof(DATA_TYPE)); hipMemcpy(A_gpu, A, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMemcpy(B_gpu, B, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMemcpy(X_gpu, X, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice); dim3 block1(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y, 1); dim3 grid1(1, 1, 1); grid1.x = (size_t)(ceil( ((float)N) / ((float)block1.x) )); /* Start timer. */ polybench_start_instruments; for (int t = 0; t < _PB_TSTEPS; t++) { hipLaunchKernelGGL(( adi_kernel1), dim3(grid1), dim3(block1), 0, 0, n, A_gpu, B_gpu, X_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( adi_kernel2), dim3(grid1), dim3(block1), 0, 0, n, A_gpu, B_gpu, X_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( adi_kernel3), dim3(grid1), dim3(block1), 0, 0, n, A_gpu, B_gpu, X_gpu); hipDeviceSynchronize(); for (int i1 = 1; i1 < _PB_N; i1++) { hipLaunchKernelGGL(( adi_kernel4), dim3(grid1), dim3(block1), 0, 0, n, A_gpu, B_gpu, X_gpu, i1); hipDeviceSynchronize(); } hipLaunchKernelGGL(( adi_kernel5), dim3(grid1), dim3(block1), 0, 0, n, A_gpu, B_gpu, X_gpu); hipDeviceSynchronize(); for (int i1 = 0; i1 < _PB_N-2; i1++) { hipLaunchKernelGGL(( adi_kernel6), dim3(grid1), dim3(block1), 0, 0, n, A_gpu, B_gpu, X_gpu, i1); hipDeviceSynchronize(); } } /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; hipMemcpy(B_outputFromGpu, B_gpu, N * N * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); hipMemcpy(X_outputFromGpu, X_gpu, N * N * sizeof(DATA_TYPE), hipMemcpyDeviceToHost); hipFree(A_gpu); hipFree(B_gpu); hipFree(X_gpu); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(X,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, X[i][j]); if ((i * N + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } int main(int argc, char *argv[]) { int tsteps = TSTEPS; int n = N; GPU_argv_init(); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(B_outputFromGpu,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(X,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(X_outputFromGpu,DATA_TYPE,N,N,n,n); init_array(n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(X)); adiCuda(tsteps, n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(X), POLYBENCH_ARRAY(B_outputFromGpu), POLYBENCH_ARRAY(X_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; adi(tsteps, n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(X)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(n, POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(B_outputFromGpu), POLYBENCH_ARRAY(X), POLYBENCH_ARRAY(X_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(X_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(B_outputFromGpu); POLYBENCH_FREE_ARRAY(X); POLYBENCH_FREE_ARRAY(X_outputFromGpu); return 0; } #include "../../common/polybench.c"
fcf8422e19ed7a546bdb1f9e422912adee92568b.cu
/** * adi.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Will Killian <killian@udel.edu> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #define POLYBENCH_TIME 1 #include "adi.cuh" #include "../../common/polybench.h" #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 2.5 #define GPU_DEVICE 0 #define RUN_ON_CPU void adi(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(X,N,N,n,n)) { for (int t = 0; t < _PB_TSTEPS; t++) { for (int i1 = 0; i1 < _PB_N; i1++) { for (int i2 = 1; i2 < _PB_N; i2++) { X[i1][i2] = X[i1][i2] - X[i1][(i2-1)] * A[i1][i2] / B[i1][(i2-1)]; B[i1][i2] = B[i1][i2] - A[i1][i2] * A[i1][i2] / B[i1][(i2-1)]; } } for (int i1 = 0; i1 < _PB_N; i1++) { X[i1][(N-1)] = X[i1][(N-1)] / B[i1][(N-1)]; } for (int i1 = 0; i1 < _PB_N; i1++) { for (int i2 = 0; i2 < _PB_N-2; i2++) { X[i1][(N-i2-2)] = (X[i1][(N-2-i2)] - X[i1][(N-2-i2-1)] * A[i1][(N-i2-3)]) / B[i1][(N-3-i2)]; } } for (int i1 = 1; i1 < _PB_N; i1++) { for (int i2 = 0; i2 < _PB_N; i2++) { X[i1][i2] = X[i1][i2] - X[(i1-1)][i2] * A[i1][i2] / B[(i1-1)][i2]; B[i1][i2] = B[i1][i2] - A[i1][i2] * A[i1][i2] / B[(i1-1)][i2]; } } for (int i2 = 0; i2 < _PB_N; i2++) { X[(N-1)][i2] = X[(N-1)][i2] / B[(N-1)][i2]; } for (int i1 = 0; i1 < _PB_N-2; i1++) { for (int i2 = 0; i2 < _PB_N; i2++) { X[(N-2-i1)][i2] = (X[(N-2-i1)][i2] - X[(N-i1-3)][i2] * A[(N-3-i1)][i2]) / B[(N-2-i1)][i2]; } } } } void init_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(X,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { X[i][j] = ((DATA_TYPE) i*(j+1) + 1) / N; A[i][j] = ((DATA_TYPE) (i-1)*(j+4) + 2) / N; B[i][j] = ((DATA_TYPE) (i+3)*(j+7) + 3) / N; } } } void compareResults(int n, DATA_TYPE POLYBENCH_2D(B_cpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(B_fromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(X_cpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(X_fromGpu,N,N,n,n)) { int i, j, fail; fail = 0; // Compare b and x output on cpu and gpu for (i=0; i < n; i++) { for (j=0; j < n; j++) { if (percentDiff(B_cpu[i][j], B_fromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } for (i=0; i<n; i++) { for (j=0; j<n; j++) { if (percentDiff(X_cpu[i][j], X_fromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void adi_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X) { int i1 = blockIdx.x * blockDim.x + threadIdx.x; if ((i1 < _PB_N)) { for (int i2 = 1; i2 < _PB_N; i2++) { X[i1*N + i2] = X[i1*N + i2] - X[i1*N + (i2-1)] * A[i1*N + i2] / B[i1*N + (i2-1)]; B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[i1*N + (i2-1)]; } } } __global__ void adi_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X) { int i1 = blockIdx.x * blockDim.x + threadIdx.x; if ((i1 < _PB_N)) { X[i1*N + (N-1)] = X[i1*N + (N-1)] / B[i1*N + (N-1)]; } } __global__ void adi_kernel3(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X) { int i1 = blockIdx.x * blockDim.x + threadIdx.x; if (i1 < _PB_N) { for (int i2 = 0; i2 < _PB_N-2; i2++) { X[i1*N + (N-i2-2)] = (X[i1*N + (N-2-i2)] - X[i1*N + (N-2-i2-1)] * A[i1*N + (N-i2-3)]) / B[i1*N + (N-3-i2)]; } } } __global__ void adi_kernel4(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, int i1) { int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < _PB_N) { X[i1*N + i2] = X[i1*N + i2] - X[(i1-1)*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2]; B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2]; } } __global__ void adi_kernel5(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X) { int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < _PB_N) { X[(N-1)*N + i2] = X[(N-1)*N + i2] / B[(N-1)*N + i2]; } } __global__ void adi_kernel6(int n, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, int i1) { int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < _PB_N) { X[(N-2-i1)*N + i2] = (X[(N-2-i1)*N + i2] - X[(N-i1-3)*N + i2] * A[(N-3-i1)*N + i2]) / B[(N-2-i1)*N + i2]; } } void adiCuda(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(X,N,N,n,n), DATA_TYPE POLYBENCH_2D(B_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(X_outputFromGpu,N,N,n,n)) { DATA_TYPE* A_gpu; DATA_TYPE* B_gpu; DATA_TYPE* X_gpu; cudaMalloc(&A_gpu, N * N * sizeof(DATA_TYPE)); cudaMalloc(&B_gpu, N * N * sizeof(DATA_TYPE)); cudaMalloc(&X_gpu, N * N * sizeof(DATA_TYPE)); cudaMemcpy(A_gpu, A, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMemcpy(X_gpu, X, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); dim3 block1(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y, 1); dim3 grid1(1, 1, 1); grid1.x = (size_t)(ceil( ((float)N) / ((float)block1.x) )); /* Start timer. */ polybench_start_instruments; for (int t = 0; t < _PB_TSTEPS; t++) { adi_kernel1<<<grid1, block1>>>(n, A_gpu, B_gpu, X_gpu); cudaThreadSynchronize(); adi_kernel2<<<grid1, block1>>>(n, A_gpu, B_gpu, X_gpu); cudaThreadSynchronize(); adi_kernel3<<<grid1, block1>>>(n, A_gpu, B_gpu, X_gpu); cudaThreadSynchronize(); for (int i1 = 1; i1 < _PB_N; i1++) { adi_kernel4<<<grid1, block1>>>(n, A_gpu, B_gpu, X_gpu, i1); cudaThreadSynchronize(); } adi_kernel5<<<grid1, block1>>>(n, A_gpu, B_gpu, X_gpu); cudaThreadSynchronize(); for (int i1 = 0; i1 < _PB_N-2; i1++) { adi_kernel6<<<grid1, block1>>>(n, A_gpu, B_gpu, X_gpu, i1); cudaThreadSynchronize(); } } /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; cudaMemcpy(B_outputFromGpu, B_gpu, N * N * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); cudaMemcpy(X_outputFromGpu, X_gpu, N * N * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(X_gpu); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(X,N,N,n,n)) { int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, X[i][j]); if ((i * N + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } int main(int argc, char *argv[]) { int tsteps = TSTEPS; int n = N; GPU_argv_init(); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(B_outputFromGpu,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(X,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(X_outputFromGpu,DATA_TYPE,N,N,n,n); init_array(n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(X)); adiCuda(tsteps, n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(X), POLYBENCH_ARRAY(B_outputFromGpu), POLYBENCH_ARRAY(X_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; adi(tsteps, n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(X)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(n, POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(B_outputFromGpu), POLYBENCH_ARRAY(X), POLYBENCH_ARRAY(X_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(X_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(B_outputFromGpu); POLYBENCH_FREE_ARRAY(X); POLYBENCH_FREE_ARRAY(X_outputFromGpu); return 0; } #include "../../common/polybench.c"
2222a8d4affffa91a0381ecab799a58c25583dbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define CHECK_ERROR(errorMessage) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", errorMessage, __FILE__, __LINE__, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } #define TILE_N 16 #define TILE_TB_HEIGHT 8 #define TILE_M (TILE_N*TILE_TB_HEIGHT) __global__ void mysgemmNT( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta ) { float c[TILE_N]; for (int i=0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.y * blockDim.x + threadIdx.x; int m = blockIdx.x * TILE_M + mid; int n = blockIdx.y * TILE_N + threadIdx.x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (int i = 0; i < k; i+=TILE_TB_HEIGHT) { float a; b_s[threadIdx.y][threadIdx.x]=B[n + (i+threadIdx.y)*ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i+j)*lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc*blockIdx.y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t+i*ldc] = C[t+i*ldc] * beta + alpha * c[i]; } } void regtileSgemm( char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc ) { if ((transa != 'N') && (transa != 'n')) { std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl; return; } if ((transb != 'T') && (transb != 't')) { std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl; return; } if ((m%TILE_M) || (n%TILE_N)) { std::cerr << "unsupported size of matrix. m should be multiple of " << TILE_M << "; n should be multiple of " << TILE_N << std::endl; } dim3 grid( m/TILE_M, n/TILE_N ), threads( TILE_N, TILE_TB_HEIGHT ); hipLaunchKernelGGL(( mysgemmNT), dim3(grid), dim3(threads), 0, 0, A, lda, B, ldb, C, ldc, k, alpha, beta); CHECK_ERROR("mySgemm"); }
2222a8d4affffa91a0381ecab799a58c25583dbf.cu
#define CHECK_ERROR(errorMessage) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } #define TILE_N 16 #define TILE_TB_HEIGHT 8 #define TILE_M (TILE_N*TILE_TB_HEIGHT) __global__ void mysgemmNT( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta ) { float c[TILE_N]; for (int i=0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.y * blockDim.x + threadIdx.x; int m = blockIdx.x * TILE_M + mid; int n = blockIdx.y * TILE_N + threadIdx.x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (int i = 0; i < k; i+=TILE_TB_HEIGHT) { float a; b_s[threadIdx.y][threadIdx.x]=B[n + (i+threadIdx.y)*ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i+j)*lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc*blockIdx.y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t+i*ldc] = C[t+i*ldc] * beta + alpha * c[i]; } } void regtileSgemm( char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc ) { if ((transa != 'N') && (transa != 'n')) { std::cerr << "unsupported value of 'transa' in regtileSgemm()" << std::endl; return; } if ((transb != 'T') && (transb != 't')) { std::cerr << "unsupported value of 'transb' in regtileSgemm()" << std::endl; return; } if ((m%TILE_M) || (n%TILE_N)) { std::cerr << "unsupported size of matrix. m should be multiple of " << TILE_M << "; n should be multiple of " << TILE_N << std::endl; } dim3 grid( m/TILE_M, n/TILE_N ), threads( TILE_N, TILE_TB_HEIGHT ); mysgemmNT<<<grid, threads>>>( A, lda, B, ldb, C, ldc, k, alpha, beta); CHECK_ERROR("mySgemm"); }
a09ff5fd882c1f683fdfdb5c66f90c6d86487bf0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Name: * Student id: * ITSC email: */ #include <cstring> #include <cstdint> #include <cstdlib> #include <vector> #include <iostream> #include "cuda_push_relabel.h" using namespace std; __global__ void pre_flow(int *dist, unsigned long long int *excess, int *cap, int *flow, int N, int src) { int global_tid = blockDim.x * blockIdx.x + threadIdx.x; int element_skip = blockDim.x * gridDim.x; if (global_tid == 0 && blockIdx.x == 0) { dist[src] = N; } for (int v = global_tid; v < N; v += element_skip) { flow[utils::dev_idx(src, v, N)] = cap[utils::dev_idx(src, v, N)]; flow[utils::dev_idx(v, src, N)] = -flow[utils::dev_idx(src, v, N)]; excess[v] = flow[utils::dev_idx(src, v, N)]; } } /* * NOTE: below there are two version of `push` function. Both are OK for us. * The second one is slight faster than the first one * */ __global__ void push(int* active_nodes, int active_nodes_size, int* dist, unsigned long long int *excess, int* stash_send, int *cap, int *flow, int N) { for (int nodes_it = blockIdx.x; nodes_it < active_nodes_size; nodes_it += gridDim.x) { auto u = active_nodes[nodes_it]; extern __shared__ int v_can_push_excess[]; __shared__ int v_can_push_count; if (threadIdx.x == 0){ v_can_push_count = 0; } __syncthreads(); for (int v = threadIdx.x; v < N; v += blockDim.x) { auto residual_cap = cap[utils::dev_idx(u, v, N)] - flow[utils::dev_idx(u, v, N)]; if (residual_cap > 0 && dist[u] > dist[v] && excess[u] != 0) { int this_pos = atomicAdd(&v_can_push_count, 2); v_can_push_excess[this_pos] = residual_cap; v_can_push_excess[this_pos + 1] = v; } } __syncthreads(); if(threadIdx.x == 0){ for (int v = 0; v < v_can_push_count && excess[u] != 0; v +=2){ if(v_can_push_excess[v] > 0){ auto send = v_can_push_excess[v] < excess[u] ? v_can_push_excess[v]:excess[u]; auto new_excess = excess[u] - send; excess[u] = new_excess; stash_send[utils::dev_idx(u, v_can_push_excess[v+1], N)] = send; } } } __syncthreads(); } } //__global__ void push(int* active_nodes, int active_nodes_size, int* dist, unsigned long long int *excess, // int* stash_send, int *cap, int *flow, int N) { // for (int nodes_it = blockIdx.x; nodes_it < active_nodes_size; nodes_it += gridDim.x) { // auto u = active_nodes[nodes_it]; // for (int v = threadIdx.x; v < N; v += blockDim.x) { // auto residual_cap = cap[utils::dev_idx(u, v, N)] - flow[utils::dev_idx(u, v, N)]; // if (residual_cap > 0 && dist[u] > dist[v] && excess[u] != 0) { // unsigned long long int send; // unsigned long long int old_excess; // unsigned long long int new_excess; // do { // old_excess = excess[u]; // send = old_excess < residual_cap ? old_excess : residual_cap; // new_excess = old_excess - send; // // auto tmp = atomicCAS(excess + u, old_excess, new_excess); // if (tmp == old_excess) { // stash_send[utils::dev_idx(u, v, N)] = send; // break; // } // } while (excess[u] != 0); // } // } // } //} __global__ void apply_push_stash(int* active_nodes, int active_nodes_size, unsigned long long int *stash_excess, int* stash_send, int *flow, int N) { for (int nodes_it = blockIdx.x; nodes_it < active_nodes_size; nodes_it += gridDim.x) { auto u = active_nodes[nodes_it]; for (int v = threadIdx.x; v < N; v += blockDim.x) { if (stash_send[utils::dev_idx(u, v, N)] > 0) { auto send = stash_send[utils::dev_idx(u, v, N)]; flow[utils::dev_idx(u, v, N)] += send; flow[utils::dev_idx(v, u, N)] -= send; atomicAdd(stash_excess + v, send); stash_send[utils::dev_idx(u, v, N)] = 0; } } } } __global__ void relabel(int* active_nodes, int active_nodes_size, unsigned long long int *excess, int* dist, int* dist_stash, int *cap, int *flow, int N) { for (int nodes_it = blockIdx.x; nodes_it < active_nodes_size; nodes_it += gridDim.x) { auto u = active_nodes[nodes_it]; if (excess[u] != 0) { __shared__ int min_dist; if (threadIdx.x == 0) { min_dist = INT32_MAX; } __syncthreads(); for (int v = threadIdx.x; v < N; v += blockDim.x) { auto residual_cap = cap[utils::dev_idx(u, v, N)] - flow[utils::dev_idx(u, v, N)]; if (residual_cap > 0) { atomicMin(&min_dist, dist[v]); } } __syncthreads(); if(threadIdx.x == 0) { dist_stash[u] = min_dist + 1; } } } } __global__ void update_excess(unsigned long long int *excess, unsigned long long int *stash_excess, int N) { int global_tid = blockDim.x * blockIdx.x + threadIdx.x; int element_skip = blockDim.x * gridDim.x; for (int v = global_tid; v < N; v += element_skip) { if (stash_excess[v] != 0) { excess[v] += stash_excess[v]; stash_excess[v] = 0; } } } int push_relabel(int blocks_per_grid, int threads_per_block, int N, int src, int sink, int *cap, int *flow) { dim3 blocks(blocks_per_grid); dim3 threads(threads_per_block); int* d_dist_even; int* d_dist_odd; uint64_t *excess = (uint64_t*) malloc(N * sizeof(uint64_t)); unsigned long long int* d_excess; unsigned long long int* d_stash_excess; int* d_cap; int* d_flow; int* d_stash_send; GPUErrChk(hipMalloc(&d_dist_even, sizeof(int) * N)); GPUErrChk(hipMalloc(&d_dist_odd, sizeof(int) * N)); hipMalloc(&d_excess, sizeof(long long int) * N); hipMalloc(&d_stash_excess, sizeof(long long int) * N); hipMalloc(&d_cap, sizeof(int) * N * N); hipMalloc(&d_flow, sizeof(int) * N * N); hipMalloc(&d_stash_send, sizeof(int) * N * N); hipMemset(d_dist_even, 0, sizeof(int) * N); hipMemset(d_stash_send, 0, sizeof(int) * N * N); hipMemcpy(d_cap, cap, sizeof(int) * N * N, hipMemcpyHostToDevice); hipMemcpy(d_flow, flow, sizeof(int) * N * N, hipMemcpyHostToDevice); //GPUErrChk(hipDeviceSynchronize()); // PreFlow. hipLaunchKernelGGL(( pre_flow), dim3(blocks), dim3(threads), 0, 0, d_dist_even, d_excess, d_cap, d_flow, N, src); int* d_active_nodes; hipMalloc(&d_active_nodes, sizeof(int) * N); int* active_nodes = (int*) malloc(N * sizeof(int)); int active_nodes_size = 0; for (auto u = 0; u < N; u++) { if (u != src && u != sink) { active_nodes[active_nodes_size++] = u; } } hipMemcpy(d_active_nodes, active_nodes, sizeof(int) * N, hipMemcpyHostToDevice); //GPUErrChk(hipDeviceSynchronize()); auto round = 0; auto iter = 0; // Four-Stage Pulses. while (active_nodes_size != 0) { // Stage 1: push. // Push Kernel. int*& d_dist = (round == 0) ? d_dist_even : d_dist_odd; int*& d_dist_stash = (round == 1) ? d_dist_even : d_dist_odd; hipLaunchKernelGGL(( push), dim3(blocks), dim3(threads), 2 * N * sizeof(int), 0, d_active_nodes, active_nodes_size, d_dist, d_excess, d_stash_send, d_cap, d_flow, N); //GPUErrChk(hipDeviceSynchronize()); hipLaunchKernelGGL(( apply_push_stash), dim3(blocks), dim3(threads), 0, 0, d_active_nodes, active_nodes_size, d_stash_excess, d_stash_send, d_flow, N); //GPUErrChk(hipDeviceSynchronize()); // Stage 2, 3: relabel (update dist to stash_dist). hipMemcpy(d_dist_stash, d_dist, sizeof(int) * N, hipMemcpyDeviceToDevice); // Relabel Kernel. hipLaunchKernelGGL(( relabel), dim3(blocks), dim3(threads), 0, 0, d_active_nodes, active_nodes_size, d_excess, d_dist, d_dist_stash, d_cap, d_flow, N); //GPUErrChk(hipDeviceSynchronize()); // Stage 4: apply excess-flow changes for destination vertices. hipLaunchKernelGGL(( update_excess), dim3(blocks), dim3(threads), 0, 0, d_excess, d_stash_excess, N); //GPUErrChk(hipDeviceSynchronize()); // Construct active nodes. hipMemcpy(excess, d_excess, sizeof(uint64_t) * N, hipMemcpyDeviceToHost); active_nodes_size = 0; for (auto u = 0; u < N; u++) { if (excess[u] != 0 && u != src && u != sink) { active_nodes[active_nodes_size++] = u; } } if (active_nodes_size > 0) { hipMemcpy(d_active_nodes, active_nodes, sizeof(int) * N, hipMemcpyHostToDevice); } round = 1 - round; iter++; } hipMemcpy(flow, d_flow, sizeof(int) * N * N, hipMemcpyDeviceToHost); free(excess); free(active_nodes); hipFree(d_cap); hipFree(d_flow); hipFree(d_active_nodes); hipFree(d_dist_odd); hipFree(d_dist_even); hipFree(d_excess); hipFree(d_stash_excess); hipFree(d_stash_send); return 0; }
a09ff5fd882c1f683fdfdb5c66f90c6d86487bf0.cu
/** * Name: * Student id: * ITSC email: */ #include <cstring> #include <cstdint> #include <cstdlib> #include <vector> #include <iostream> #include "cuda_push_relabel.h" using namespace std; __global__ void pre_flow(int *dist, unsigned long long int *excess, int *cap, int *flow, int N, int src) { int global_tid = blockDim.x * blockIdx.x + threadIdx.x; int element_skip = blockDim.x * gridDim.x; if (global_tid == 0 && blockIdx.x == 0) { dist[src] = N; } for (int v = global_tid; v < N; v += element_skip) { flow[utils::dev_idx(src, v, N)] = cap[utils::dev_idx(src, v, N)]; flow[utils::dev_idx(v, src, N)] = -flow[utils::dev_idx(src, v, N)]; excess[v] = flow[utils::dev_idx(src, v, N)]; } } /* * NOTE: below there are two version of `push` function. Both are OK for us. * The second one is slight faster than the first one * */ __global__ void push(int* active_nodes, int active_nodes_size, int* dist, unsigned long long int *excess, int* stash_send, int *cap, int *flow, int N) { for (int nodes_it = blockIdx.x; nodes_it < active_nodes_size; nodes_it += gridDim.x) { auto u = active_nodes[nodes_it]; extern __shared__ int v_can_push_excess[]; __shared__ int v_can_push_count; if (threadIdx.x == 0){ v_can_push_count = 0; } __syncthreads(); for (int v = threadIdx.x; v < N; v += blockDim.x) { auto residual_cap = cap[utils::dev_idx(u, v, N)] - flow[utils::dev_idx(u, v, N)]; if (residual_cap > 0 && dist[u] > dist[v] && excess[u] != 0) { int this_pos = atomicAdd(&v_can_push_count, 2); v_can_push_excess[this_pos] = residual_cap; v_can_push_excess[this_pos + 1] = v; } } __syncthreads(); if(threadIdx.x == 0){ for (int v = 0; v < v_can_push_count && excess[u] != 0; v +=2){ if(v_can_push_excess[v] > 0){ auto send = v_can_push_excess[v] < excess[u] ? v_can_push_excess[v]:excess[u]; auto new_excess = excess[u] - send; excess[u] = new_excess; stash_send[utils::dev_idx(u, v_can_push_excess[v+1], N)] = send; } } } __syncthreads(); } } //__global__ void push(int* active_nodes, int active_nodes_size, int* dist, unsigned long long int *excess, // int* stash_send, int *cap, int *flow, int N) { // for (int nodes_it = blockIdx.x; nodes_it < active_nodes_size; nodes_it += gridDim.x) { // auto u = active_nodes[nodes_it]; // for (int v = threadIdx.x; v < N; v += blockDim.x) { // auto residual_cap = cap[utils::dev_idx(u, v, N)] - flow[utils::dev_idx(u, v, N)]; // if (residual_cap > 0 && dist[u] > dist[v] && excess[u] != 0) { // unsigned long long int send; // unsigned long long int old_excess; // unsigned long long int new_excess; // do { // old_excess = excess[u]; // send = old_excess < residual_cap ? old_excess : residual_cap; // new_excess = old_excess - send; // // auto tmp = atomicCAS(excess + u, old_excess, new_excess); // if (tmp == old_excess) { // stash_send[utils::dev_idx(u, v, N)] = send; // break; // } // } while (excess[u] != 0); // } // } // } //} __global__ void apply_push_stash(int* active_nodes, int active_nodes_size, unsigned long long int *stash_excess, int* stash_send, int *flow, int N) { for (int nodes_it = blockIdx.x; nodes_it < active_nodes_size; nodes_it += gridDim.x) { auto u = active_nodes[nodes_it]; for (int v = threadIdx.x; v < N; v += blockDim.x) { if (stash_send[utils::dev_idx(u, v, N)] > 0) { auto send = stash_send[utils::dev_idx(u, v, N)]; flow[utils::dev_idx(u, v, N)] += send; flow[utils::dev_idx(v, u, N)] -= send; atomicAdd(stash_excess + v, send); stash_send[utils::dev_idx(u, v, N)] = 0; } } } } __global__ void relabel(int* active_nodes, int active_nodes_size, unsigned long long int *excess, int* dist, int* dist_stash, int *cap, int *flow, int N) { for (int nodes_it = blockIdx.x; nodes_it < active_nodes_size; nodes_it += gridDim.x) { auto u = active_nodes[nodes_it]; if (excess[u] != 0) { __shared__ int min_dist; if (threadIdx.x == 0) { min_dist = INT32_MAX; } __syncthreads(); for (int v = threadIdx.x; v < N; v += blockDim.x) { auto residual_cap = cap[utils::dev_idx(u, v, N)] - flow[utils::dev_idx(u, v, N)]; if (residual_cap > 0) { atomicMin(&min_dist, dist[v]); } } __syncthreads(); if(threadIdx.x == 0) { dist_stash[u] = min_dist + 1; } } } } __global__ void update_excess(unsigned long long int *excess, unsigned long long int *stash_excess, int N) { int global_tid = blockDim.x * blockIdx.x + threadIdx.x; int element_skip = blockDim.x * gridDim.x; for (int v = global_tid; v < N; v += element_skip) { if (stash_excess[v] != 0) { excess[v] += stash_excess[v]; stash_excess[v] = 0; } } } int push_relabel(int blocks_per_grid, int threads_per_block, int N, int src, int sink, int *cap, int *flow) { dim3 blocks(blocks_per_grid); dim3 threads(threads_per_block); int* d_dist_even; int* d_dist_odd; uint64_t *excess = (uint64_t*) malloc(N * sizeof(uint64_t)); unsigned long long int* d_excess; unsigned long long int* d_stash_excess; int* d_cap; int* d_flow; int* d_stash_send; GPUErrChk(cudaMalloc(&d_dist_even, sizeof(int) * N)); GPUErrChk(cudaMalloc(&d_dist_odd, sizeof(int) * N)); cudaMalloc(&d_excess, sizeof(long long int) * N); cudaMalloc(&d_stash_excess, sizeof(long long int) * N); cudaMalloc(&d_cap, sizeof(int) * N * N); cudaMalloc(&d_flow, sizeof(int) * N * N); cudaMalloc(&d_stash_send, sizeof(int) * N * N); cudaMemset(d_dist_even, 0, sizeof(int) * N); cudaMemset(d_stash_send, 0, sizeof(int) * N * N); cudaMemcpy(d_cap, cap, sizeof(int) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(d_flow, flow, sizeof(int) * N * N, cudaMemcpyHostToDevice); //GPUErrChk(cudaDeviceSynchronize()); // PreFlow. pre_flow<<<blocks, threads>>>(d_dist_even, d_excess, d_cap, d_flow, N, src); int* d_active_nodes; cudaMalloc(&d_active_nodes, sizeof(int) * N); int* active_nodes = (int*) malloc(N * sizeof(int)); int active_nodes_size = 0; for (auto u = 0; u < N; u++) { if (u != src && u != sink) { active_nodes[active_nodes_size++] = u; } } cudaMemcpy(d_active_nodes, active_nodes, sizeof(int) * N, cudaMemcpyHostToDevice); //GPUErrChk(cudaDeviceSynchronize()); auto round = 0; auto iter = 0; // Four-Stage Pulses. while (active_nodes_size != 0) { // Stage 1: push. // Push Kernel. int*& d_dist = (round == 0) ? d_dist_even : d_dist_odd; int*& d_dist_stash = (round == 1) ? d_dist_even : d_dist_odd; push<<<blocks, threads, 2 * N * sizeof(int)>>>(d_active_nodes, active_nodes_size, d_dist, d_excess, d_stash_send, d_cap, d_flow, N); //GPUErrChk(cudaDeviceSynchronize()); apply_push_stash<<<blocks, threads>>>(d_active_nodes, active_nodes_size, d_stash_excess, d_stash_send, d_flow, N); //GPUErrChk(cudaDeviceSynchronize()); // Stage 2, 3: relabel (update dist to stash_dist). cudaMemcpy(d_dist_stash, d_dist, sizeof(int) * N, cudaMemcpyDeviceToDevice); // Relabel Kernel. relabel<<<blocks, threads>>>(d_active_nodes, active_nodes_size, d_excess, d_dist, d_dist_stash, d_cap, d_flow, N); //GPUErrChk(cudaDeviceSynchronize()); // Stage 4: apply excess-flow changes for destination vertices. update_excess<<<blocks, threads>>>(d_excess, d_stash_excess, N); //GPUErrChk(cudaDeviceSynchronize()); // Construct active nodes. cudaMemcpy(excess, d_excess, sizeof(uint64_t) * N, cudaMemcpyDeviceToHost); active_nodes_size = 0; for (auto u = 0; u < N; u++) { if (excess[u] != 0 && u != src && u != sink) { active_nodes[active_nodes_size++] = u; } } if (active_nodes_size > 0) { cudaMemcpy(d_active_nodes, active_nodes, sizeof(int) * N, cudaMemcpyHostToDevice); } round = 1 - round; iter++; } cudaMemcpy(flow, d_flow, sizeof(int) * N * N, cudaMemcpyDeviceToHost); free(excess); free(active_nodes); cudaFree(d_cap); cudaFree(d_flow); cudaFree(d_active_nodes); cudaFree(d_dist_odd); cudaFree(d_dist_even); cudaFree(d_excess); cudaFree(d_stash_excess); cudaFree(d_stash_send); return 0; }
26828b1cca5d1624fe47060cf21ee511e9e663d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Black-Scholes [1] is a well-known algorithm from computational finance applied in the pricing // of financial instruments called "call" and "put" options. As a numerically-hungry, // computationally-bound kernel, it was an early success story in GPU computing [2]. We'll leave // understanding the partial differential equations governing Black-Scholes to the quants, and // instead explore the effects of divergence on computationally-bound kernels, using Black-Scholes // as our straw man. By the end of this MP, you should understand how to apply stream compaction [3] // as a simple scheduling technique to ensure good utilization amidst computational divergence. // // Imagine you are an quantative analyst at an investment bank studying the effects of // "risk" and "volatility" on the options pricing of a set of stocks. These two parameters act as // knobs to the Black-Scholes algorithm. After one round of the algorithm, you're interested in // a subset of the stocks whose call and put prices exceed some threshold. After selecting the // stocks whose prices meet this filter, you'd like to run a set of subsequent rounds of = // Black-Scholes at different points in the parameter space. In Part 1, we'll implement a // straightforward algorithm achieving this goal, but find out we leave a lot of throughput on the // table. // // Your job in Part 1 is simple: search this file and black_scholes.cu for lines marked // TODO: and fill in the code. The host implementations should make it clear what to do. // // [1] http://en.wikipedia.org/wiki/Black-scholes // [2] http://http.developer.nvidia.com/GPUGems2/gpugems2_chapter45.html // [3] http://graphics.cs.uiuc.edu/~jch/papers/shadersorting.pdf #include <stdlib.h> #include <stdio.h> #include <vector> #include <iostream> #include <limits> #include <numeric> #include "black_scholes.h" #include "mp3-util.h" //Allocates space on CUDA device. Returns success. bool allocate_device_storage(real *&d_first_round_call_result, real *&d_first_round_put_result, real *&d_subsequent_round_call_result, real *&d_subsequent_round_put_result, real *&d_stock_price, real *&d_option_strike, real *&d_option_years, const size_t n) { hipMalloc((void **) &d_first_round_call_result, n*sizeof(float3)); hipMalloc((void **) &d_first_round_put_result, n*sizeof(float3)); hipMalloc((void **) &d_subsequent_round_call_result, n*sizeof(float3)); hipMalloc((void **) &d_subsequent_round_put_result, n*sizeof(float3)); hipMalloc((void **) &d_stock_price, n*sizeof(float3)); hipMalloc((void **) &d_option_strike, n*sizeof(float3)); hipMalloc((void **) &d_option_years, n*sizeof(float3)); if(d_first_round_call_result == 0 || d_first_round_put_result == 0 || d_subsequent_round_call_result == 0 || d_subsequent_round_put_result == 0 || d_stock_price == 0 || d_option_strike == 0 || d_option_years == 0) return false; else return true; } //Deallocates the space allocated by above method. void deallocate_device_storage(real *d_first_round_call_result, real *d_first_round_put_result, real *d_subsequent_round_call_result, real *d_subsequent_round_put_result, real *d_stock_price, real *d_option_strike, real *d_option_years) { hipFree(d_first_round_call_result); hipFree(d_first_round_put_result); hipFree(d_subsequent_round_call_result); hipFree(d_subsequent_round_put_result); hipFree(d_stock_price); hipFree(d_option_strike); hipFree(d_option_years); } int main(void) { event_pair timer; const size_t num_subsequent_rounds = 5; std::vector<float> gpu_time(1 + num_subsequent_rounds); std::vector<float> cpu_time(1 + num_subsequent_rounds); // create arrays for 4M options size_t num_options = 1<<22; // allocate host storage std::vector<real> h_first_round_call_result(num_options,0); std::vector<real> h_first_round_put_result(num_options, 0); std::vector<real> h_subsequent_round_call_result(num_options,0); std::vector<real> h_subsequent_round_put_result(num_options, 0); std::vector<real> h_stock_price(num_options); std::vector<real> h_option_strike(num_options); std::vector<real> h_option_years(num_options); // generate options set srand(5347); for(int i = 0; i < num_options; ++i) { h_stock_price[i] = random_real(5.0, 30.0); h_option_strike[i] = random_real(1.0, 100.0); h_option_years[i] = random_real(0.25, 10.0); } // some pointers to the data set which will live in device memory real *d_first_round_call_result = 0; real *d_first_round_put_result = 0; real *d_subsequent_round_call_result = 0; real *d_subsequent_round_put_result = 0; real *d_stock_price = 0; real *d_option_strike = 0; real *d_option_years = 0; // allocate device storage if(!allocate_device_storage(d_first_round_call_result, d_first_round_put_result, d_subsequent_round_call_result, d_subsequent_round_put_result, d_stock_price, d_option_strike, d_option_years, num_options)) { std::cerr << "Error allocating device memory!" << std::endl; exit(-1); } // fill the result arrays with 0 hipMemset(d_first_round_call_result, 0, sizeof(real) * num_options); hipMemset(d_first_round_put_result, 0, sizeof(real) * num_options); hipMemset(d_subsequent_round_call_result, 0, sizeof(real) * num_options); hipMemset(d_subsequent_round_put_result, 0, sizeof(real) * num_options); // copy input to GPU start_timer(&timer); hipMemcpy(d_stock_price, &h_stock_price[0], sizeof(real) * num_options, hipMemcpyHostToDevice); hipMemcpy(d_option_strike, &h_option_strike[0], sizeof(real) * num_options, hipMemcpyHostToDevice); hipMemcpy(d_option_years, &h_option_years[0], sizeof(real) * num_options, hipMemcpyHostToDevice); stop_timer(&timer, "host to device copy of input"); // BEGIN ROUND 0 // we will use the two following parameters // to first round of the Black-Scholes algorithm const real first_round_riskless_rate = 0.02; const real first_round_volatility = 0.30; //Calculates kernel launch parameters. int blockSize=512; int nBlocks = num_options/blockSize + (num_options%blockSize == 0?0:1); // do the first round of Black-Scholes using our parameters start_timer(&timer); hipLaunchKernelGGL(( black_scholes_kernel), dim3(nBlocks), dim3(blockSize) , 0, 0, d_stock_price, d_option_strike, d_option_years, d_first_round_call_result, d_first_round_put_result, first_round_riskless_rate, first_round_volatility, num_options); gpu_time[0] = stop_timer(&timer, "GPU Black-Scholes round 0"); check_cuda_error("GPU Black-Scholes round 0", __FILE__, __LINE__); // do round 0 of Black-Scholes on the host start_timer(&timer); black_scholes_host(&h_stock_price[0], &h_option_strike[0], &h_option_years[0], &h_first_round_call_result[0], &h_first_round_put_result[0], first_round_riskless_rate, first_round_volatility, num_options); cpu_time[0] = stop_timer(&timer, "CPU Black-Scholes round 0"); // validate gpu results from round 0 std::vector<real> h_validate_me(num_options); hipMemcpy(&h_validate_me[0], d_first_round_call_result, sizeof(real) * num_options, hipMemcpyDeviceToHost); // pass true as a final optional argument to fuzzy_validate for verbose output if(!fuzzy_validate(&h_validate_me[0], &h_first_round_call_result[0], num_options)) { std::cerr << "Error: round 0 of call results don't match!" << std::endl; exit(-1); } hipMemcpy(&h_validate_me[0], d_first_round_put_result, sizeof(real) * num_options, hipMemcpyDeviceToHost); if(!fuzzy_validate(&h_validate_me[0], &h_first_round_put_result[0], num_options)) { std::cerr << "Error: round 0 of put results don't match!" << std::endl; exit(-1); } // BEGIN SUBSEQUENT ROUNDS // in subsequent rounds, select the stocks whose call & put prices from the first round // meet or exceed these thresholds const real min_call_threshold = 2.0; const real min_put_threshold = 4.0; size_t num_filtered_options = 0; for(int round = 1; round < num_subsequent_rounds + 1; ++round) { // change the parameters of the model in each subsequent round const real riskless_rate = random_real(0.03, 0.04); const real volatility = random_real(0.50, 0.60); // do round of Black-Scholes using new parameters on the device // filter the set of options to compute given the resuts of the last round start_timer(&timer); hipLaunchKernelGGL(( naively_filtered_black_scholes_kernel), dim3(nBlocks), dim3(blockSize), 0, 0, d_stock_price, d_option_strike, d_option_years, d_first_round_call_result, d_first_round_put_result, d_subsequent_round_call_result, d_subsequent_round_put_result, min_call_threshold, min_put_threshold, riskless_rate, volatility, num_options); char message[256]; sprintf(message, "GPU Black-Scholes round %d", round); gpu_time[round] = stop_timer(&timer, message); check_cuda_error(message, __FILE__, __LINE__); // do a round of Black-Scholes on the host using new parameters // filter the set of options to compute given the results of the last round start_timer(&timer); num_filtered_options = filtered_black_scholes_host(&h_stock_price[0], &h_option_strike[0], &h_option_years[0], &h_first_round_call_result[0], &h_first_round_put_result[0], &h_subsequent_round_call_result[0], &h_subsequent_round_put_result[0], min_call_threshold, min_put_threshold, riskless_rate, volatility, num_options); sprintf(message, "CPU Black-Scholes round %d", round); cpu_time[round] = stop_timer(&timer, message); // validate gpu results from this round hipMemcpy(&h_validate_me[0], d_subsequent_round_call_result, sizeof(real) * num_options, hipMemcpyDeviceToHost); if(!fuzzy_validate(&h_validate_me[0], &h_subsequent_round_call_result[0], num_options)) { std::cerr << "Error: round " << round << " of call results don't match!" << std::endl; exit(-1); } hipMemcpy(&h_validate_me[0], d_subsequent_round_put_result, sizeof(real) * num_options, hipMemcpyDeviceToHost); if(!fuzzy_validate(&h_validate_me[0], &h_subsequent_round_put_result[0], num_options)) { std::cerr << "Error: round " << round << " of put results don't match!" << std::endl; exit(-1); } } // end for subsequent round deallocate_device_storage(d_first_round_call_result, d_first_round_put_result, d_subsequent_round_call_result, d_subsequent_round_put_result, d_stock_price, d_option_strike, d_option_years); // output a report std::cout << std::endl; real first_round_gpu_throughput = static_cast<real>(num_options) / (gpu_time[0] / 1000.0f); real first_round_cpu_throughput = static_cast<real>(num_options) / (cpu_time[0] / 1000.0f); std::cout << "Round 0: " << num_options << " options" << std::endl; std::cout << "Throughput of GPU Black-Scholes Round 0: " << (first_round_gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Throughput of CPU Black-Scholes Round 0: " << (first_round_cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Speedup of Round 0: " << first_round_gpu_throughput / first_round_cpu_throughput << "x" << std::endl << std::endl; for(int i = 1; i < gpu_time.size(); ++i) { real gpu_throughput = static_cast<real>(num_filtered_options) / (gpu_time[i] / 1000.0f); real cpu_throughput = static_cast<real>(num_filtered_options) / (cpu_time[i] / 1000.0f); std::cout << "Round " << i << ": " << num_filtered_options << " options" << std::endl; std::cout << "Throughput of GPU Black-Scholes Round " << i << ": " << (gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Throughput of CPU Black-Scholes Round " << i << ": " << (cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Speedup of Round " << i << ": " << gpu_throughput / cpu_throughput << "x" << std::endl << std::endl; } // report overall performance real total_gpu_time = std::accumulate(gpu_time.begin(), gpu_time.end(), 0.0); real total_cpu_time = std::accumulate(cpu_time.begin(), cpu_time.end(), 0.0); real gpu_throughput = static_cast<real>(num_options + num_subsequent_rounds*num_filtered_options) / ((total_gpu_time) / 1000.0f); real cpu_throughput = static_cast<real>(num_options + num_subsequent_rounds*num_filtered_options) / ((total_cpu_time) / 1000.0f); std::cout << "Overall GPU throughput: " << (gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Overall CPU throughput: " << (cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl << std::endl; std::cout << "Overall speedup: " << gpu_throughput / cpu_throughput << "x" << std::endl; return 0; }
26828b1cca5d1624fe47060cf21ee511e9e663d3.cu
// Black-Scholes [1] is a well-known algorithm from computational finance applied in the pricing // of financial instruments called "call" and "put" options. As a numerically-hungry, // computationally-bound kernel, it was an early success story in GPU computing [2]. We'll leave // understanding the partial differential equations governing Black-Scholes to the quants, and // instead explore the effects of divergence on computationally-bound kernels, using Black-Scholes // as our straw man. By the end of this MP, you should understand how to apply stream compaction [3] // as a simple scheduling technique to ensure good utilization amidst computational divergence. // // Imagine you are an quantative analyst at an investment bank studying the effects of // "risk" and "volatility" on the options pricing of a set of stocks. These two parameters act as // knobs to the Black-Scholes algorithm. After one round of the algorithm, you're interested in // a subset of the stocks whose call and put prices exceed some threshold. After selecting the // stocks whose prices meet this filter, you'd like to run a set of subsequent rounds of = // Black-Scholes at different points in the parameter space. In Part 1, we'll implement a // straightforward algorithm achieving this goal, but find out we leave a lot of throughput on the // table. // // Your job in Part 1 is simple: search this file and black_scholes.cu for lines marked // TODO: and fill in the code. The host implementations should make it clear what to do. // // [1] http://en.wikipedia.org/wiki/Black-scholes // [2] http://http.developer.nvidia.com/GPUGems2/gpugems2_chapter45.html // [3] http://graphics.cs.uiuc.edu/~jch/papers/shadersorting.pdf #include <stdlib.h> #include <stdio.h> #include <vector> #include <iostream> #include <limits> #include <numeric> #include "black_scholes.h" #include "mp3-util.h" //Allocates space on CUDA device. Returns success. bool allocate_device_storage(real *&d_first_round_call_result, real *&d_first_round_put_result, real *&d_subsequent_round_call_result, real *&d_subsequent_round_put_result, real *&d_stock_price, real *&d_option_strike, real *&d_option_years, const size_t n) { cudaMalloc((void **) &d_first_round_call_result, n*sizeof(float3)); cudaMalloc((void **) &d_first_round_put_result, n*sizeof(float3)); cudaMalloc((void **) &d_subsequent_round_call_result, n*sizeof(float3)); cudaMalloc((void **) &d_subsequent_round_put_result, n*sizeof(float3)); cudaMalloc((void **) &d_stock_price, n*sizeof(float3)); cudaMalloc((void **) &d_option_strike, n*sizeof(float3)); cudaMalloc((void **) &d_option_years, n*sizeof(float3)); if(d_first_round_call_result == 0 || d_first_round_put_result == 0 || d_subsequent_round_call_result == 0 || d_subsequent_round_put_result == 0 || d_stock_price == 0 || d_option_strike == 0 || d_option_years == 0) return false; else return true; } //Deallocates the space allocated by above method. void deallocate_device_storage(real *d_first_round_call_result, real *d_first_round_put_result, real *d_subsequent_round_call_result, real *d_subsequent_round_put_result, real *d_stock_price, real *d_option_strike, real *d_option_years) { cudaFree(d_first_round_call_result); cudaFree(d_first_round_put_result); cudaFree(d_subsequent_round_call_result); cudaFree(d_subsequent_round_put_result); cudaFree(d_stock_price); cudaFree(d_option_strike); cudaFree(d_option_years); } int main(void) { event_pair timer; const size_t num_subsequent_rounds = 5; std::vector<float> gpu_time(1 + num_subsequent_rounds); std::vector<float> cpu_time(1 + num_subsequent_rounds); // create arrays for 4M options size_t num_options = 1<<22; // allocate host storage std::vector<real> h_first_round_call_result(num_options,0); std::vector<real> h_first_round_put_result(num_options, 0); std::vector<real> h_subsequent_round_call_result(num_options,0); std::vector<real> h_subsequent_round_put_result(num_options, 0); std::vector<real> h_stock_price(num_options); std::vector<real> h_option_strike(num_options); std::vector<real> h_option_years(num_options); // generate options set srand(5347); for(int i = 0; i < num_options; ++i) { h_stock_price[i] = random_real(5.0, 30.0); h_option_strike[i] = random_real(1.0, 100.0); h_option_years[i] = random_real(0.25, 10.0); } // some pointers to the data set which will live in device memory real *d_first_round_call_result = 0; real *d_first_round_put_result = 0; real *d_subsequent_round_call_result = 0; real *d_subsequent_round_put_result = 0; real *d_stock_price = 0; real *d_option_strike = 0; real *d_option_years = 0; // allocate device storage if(!allocate_device_storage(d_first_round_call_result, d_first_round_put_result, d_subsequent_round_call_result, d_subsequent_round_put_result, d_stock_price, d_option_strike, d_option_years, num_options)) { std::cerr << "Error allocating device memory!" << std::endl; exit(-1); } // fill the result arrays with 0 cudaMemset(d_first_round_call_result, 0, sizeof(real) * num_options); cudaMemset(d_first_round_put_result, 0, sizeof(real) * num_options); cudaMemset(d_subsequent_round_call_result, 0, sizeof(real) * num_options); cudaMemset(d_subsequent_round_put_result, 0, sizeof(real) * num_options); // copy input to GPU start_timer(&timer); cudaMemcpy(d_stock_price, &h_stock_price[0], sizeof(real) * num_options, cudaMemcpyHostToDevice); cudaMemcpy(d_option_strike, &h_option_strike[0], sizeof(real) * num_options, cudaMemcpyHostToDevice); cudaMemcpy(d_option_years, &h_option_years[0], sizeof(real) * num_options, cudaMemcpyHostToDevice); stop_timer(&timer, "host to device copy of input"); // BEGIN ROUND 0 // we will use the two following parameters // to first round of the Black-Scholes algorithm const real first_round_riskless_rate = 0.02; const real first_round_volatility = 0.30; //Calculates kernel launch parameters. int blockSize=512; int nBlocks = num_options/blockSize + (num_options%blockSize == 0?0:1); // do the first round of Black-Scholes using our parameters start_timer(&timer); black_scholes_kernel<<< nBlocks, blockSize >>>(d_stock_price, d_option_strike, d_option_years, d_first_round_call_result, d_first_round_put_result, first_round_riskless_rate, first_round_volatility, num_options); gpu_time[0] = stop_timer(&timer, "GPU Black-Scholes round 0"); check_cuda_error("GPU Black-Scholes round 0", __FILE__, __LINE__); // do round 0 of Black-Scholes on the host start_timer(&timer); black_scholes_host(&h_stock_price[0], &h_option_strike[0], &h_option_years[0], &h_first_round_call_result[0], &h_first_round_put_result[0], first_round_riskless_rate, first_round_volatility, num_options); cpu_time[0] = stop_timer(&timer, "CPU Black-Scholes round 0"); // validate gpu results from round 0 std::vector<real> h_validate_me(num_options); cudaMemcpy(&h_validate_me[0], d_first_round_call_result, sizeof(real) * num_options, cudaMemcpyDeviceToHost); // pass true as a final optional argument to fuzzy_validate for verbose output if(!fuzzy_validate(&h_validate_me[0], &h_first_round_call_result[0], num_options)) { std::cerr << "Error: round 0 of call results don't match!" << std::endl; exit(-1); } cudaMemcpy(&h_validate_me[0], d_first_round_put_result, sizeof(real) * num_options, cudaMemcpyDeviceToHost); if(!fuzzy_validate(&h_validate_me[0], &h_first_round_put_result[0], num_options)) { std::cerr << "Error: round 0 of put results don't match!" << std::endl; exit(-1); } // BEGIN SUBSEQUENT ROUNDS // in subsequent rounds, select the stocks whose call & put prices from the first round // meet or exceed these thresholds const real min_call_threshold = 2.0; const real min_put_threshold = 4.0; size_t num_filtered_options = 0; for(int round = 1; round < num_subsequent_rounds + 1; ++round) { // change the parameters of the model in each subsequent round const real riskless_rate = random_real(0.03, 0.04); const real volatility = random_real(0.50, 0.60); // do round of Black-Scholes using new parameters on the device // filter the set of options to compute given the resuts of the last round start_timer(&timer); naively_filtered_black_scholes_kernel<<< nBlocks, blockSize>>>(d_stock_price, d_option_strike, d_option_years, d_first_round_call_result, d_first_round_put_result, d_subsequent_round_call_result, d_subsequent_round_put_result, min_call_threshold, min_put_threshold, riskless_rate, volatility, num_options); char message[256]; sprintf(message, "GPU Black-Scholes round %d", round); gpu_time[round] = stop_timer(&timer, message); check_cuda_error(message, __FILE__, __LINE__); // do a round of Black-Scholes on the host using new parameters // filter the set of options to compute given the results of the last round start_timer(&timer); num_filtered_options = filtered_black_scholes_host(&h_stock_price[0], &h_option_strike[0], &h_option_years[0], &h_first_round_call_result[0], &h_first_round_put_result[0], &h_subsequent_round_call_result[0], &h_subsequent_round_put_result[0], min_call_threshold, min_put_threshold, riskless_rate, volatility, num_options); sprintf(message, "CPU Black-Scholes round %d", round); cpu_time[round] = stop_timer(&timer, message); // validate gpu results from this round cudaMemcpy(&h_validate_me[0], d_subsequent_round_call_result, sizeof(real) * num_options, cudaMemcpyDeviceToHost); if(!fuzzy_validate(&h_validate_me[0], &h_subsequent_round_call_result[0], num_options)) { std::cerr << "Error: round " << round << " of call results don't match!" << std::endl; exit(-1); } cudaMemcpy(&h_validate_me[0], d_subsequent_round_put_result, sizeof(real) * num_options, cudaMemcpyDeviceToHost); if(!fuzzy_validate(&h_validate_me[0], &h_subsequent_round_put_result[0], num_options)) { std::cerr << "Error: round " << round << " of put results don't match!" << std::endl; exit(-1); } } // end for subsequent round deallocate_device_storage(d_first_round_call_result, d_first_round_put_result, d_subsequent_round_call_result, d_subsequent_round_put_result, d_stock_price, d_option_strike, d_option_years); // output a report std::cout << std::endl; real first_round_gpu_throughput = static_cast<real>(num_options) / (gpu_time[0] / 1000.0f); real first_round_cpu_throughput = static_cast<real>(num_options) / (cpu_time[0] / 1000.0f); std::cout << "Round 0: " << num_options << " options" << std::endl; std::cout << "Throughput of GPU Black-Scholes Round 0: " << (first_round_gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Throughput of CPU Black-Scholes Round 0: " << (first_round_cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Speedup of Round 0: " << first_round_gpu_throughput / first_round_cpu_throughput << "x" << std::endl << std::endl; for(int i = 1; i < gpu_time.size(); ++i) { real gpu_throughput = static_cast<real>(num_filtered_options) / (gpu_time[i] / 1000.0f); real cpu_throughput = static_cast<real>(num_filtered_options) / (cpu_time[i] / 1000.0f); std::cout << "Round " << i << ": " << num_filtered_options << " options" << std::endl; std::cout << "Throughput of GPU Black-Scholes Round " << i << ": " << (gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Throughput of CPU Black-Scholes Round " << i << ": " << (cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Speedup of Round " << i << ": " << gpu_throughput / cpu_throughput << "x" << std::endl << std::endl; } // report overall performance real total_gpu_time = std::accumulate(gpu_time.begin(), gpu_time.end(), 0.0); real total_cpu_time = std::accumulate(cpu_time.begin(), cpu_time.end(), 0.0); real gpu_throughput = static_cast<real>(num_options + num_subsequent_rounds*num_filtered_options) / ((total_gpu_time) / 1000.0f); real cpu_throughput = static_cast<real>(num_options + num_subsequent_rounds*num_filtered_options) / ((total_cpu_time) / 1000.0f); std::cout << "Overall GPU throughput: " << (gpu_throughput / 1e6) << " Megaoptions/sec" << std::endl; std::cout << "Overall CPU throughput: " << (cpu_throughput / 1e6) << " Megaoptions/sec" << std::endl << std::endl; std::cout << "Overall speedup: " << gpu_throughput / cpu_throughput << "x" << std::endl; return 0; }
0369e636537d8aff9912aee91c902bcdcbd7a960.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include "cuda_utils.h" #include "timer.c" typedef float dtype; #define BLOCK_DIM_Y 4 #define PATCH_DIM 32 __global__ void matTrans(dtype* AT, dtype* A, int N) { /* Fill your code here */ __shared__ dtype scratch[PATCH_DIM][PATCH_DIM+1]; int x = blockIdx.x * PATCH_DIM + threadIdx.x; int y = blockIdx.y * PATCH_DIM + threadIdx.y; int i; if (x < N){ for (i = 0; i < PATCH_DIM && y < N; i += BLOCK_DIM_Y, y += BLOCK_DIM_Y){ scratch[threadIdx.y + i][threadIdx.x] = A[y * N + x]; } } x = blockIdx.y * PATCH_DIM + threadIdx.x; y = blockIdx.x * PATCH_DIM + threadIdx.y; __syncthreads(); if (x < N){ for (i = 0; i < PATCH_DIM && y < N; i += BLOCK_DIM_Y, y += BLOCK_DIM_Y){ AT[y * N + x] = scratch[threadIdx.x][threadIdx.y + i] ; } } } void parseArg (int argc, char** argv, int* N) { if(argc == 2) { *N = atoi (argv[1]); assert (*N > 0); } else { fprintf (stderr, "usage: %s <N>\n", argv[0]); exit (EXIT_FAILURE); } } void initArr (dtype* in, int N) { int i; for(i = 0; i < N; i++) { in[i] = (dtype) rand () / RAND_MAX; } } void cpuTranspose (dtype* A, dtype* AT, int N) { int i, j; for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { AT[j * N + i] = A[i * N + j]; } } } int cmpArr (dtype* a, dtype* b, int N) { int cnt, i; cnt = 0; for(i = 0; i < N; i++) { if(abs(a[i] - b[i]) > 1e-6) cnt++; } return cnt; } void gpuTranspose (dtype* A, dtype* AT, int N) { dtype *d_idata, *d_odata; CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * N * sizeof (dtype))); CUDA_CHECK_ERROR (hipMalloc (&d_odata, N * N * sizeof (dtype))); CUDA_CHECK_ERROR (hipMemcpy (d_idata, A, N * N * sizeof (dtype), hipMemcpyHostToDevice)); dim3 gb((N + PATCH_DIM - 1 / PATCH_DIM), (N + PATCH_DIM - 1) / PATCH_DIM, 1); dim3 tb(PATCH_DIM, BLOCK_DIM_Y, 1); hipLaunchKernelGGL(( matTrans) , dim3(gb), dim3(tb), 0, 0, d_odata, d_idata, N); struct stopwatch_t* timer = NULL; long double t_gpu; /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* run your kernel here */ hipLaunchKernelGGL(( matTrans) , dim3(gb), dim3(tb), 0, 0, d_odata, d_idata, N); hipDeviceSynchronize (); t_gpu = stopwatch_stop (timer); fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 ); CUDA_CHECK_ERROR (hipMemcpy (AT, d_odata, N * N * sizeof (dtype), hipMemcpyDeviceToHost)); } int main(int argc, char** argv) { /* variables */ dtype *A, *ATgpu, *ATcpu; int err; int N; struct stopwatch_t* timer = NULL; long double t_cpu; N = -1; parseArg (argc, argv, &N); /* input and output matrices on host */ /* output */ ATcpu = (dtype*) malloc (N * N * sizeof (dtype)); ATgpu = (dtype*) malloc (N * N * sizeof (dtype)); /* input */ A = (dtype*) malloc (N * N * sizeof (dtype)); initArr (A, N * N); /* GPU transpose kernel */ gpuTranspose (A, ATgpu, N); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* compute reference array */ cpuTranspose (A, ATcpu, N); t_cpu = stopwatch_stop (timer); fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n", t_cpu); /* check correctness */ err = cmpArr (ATgpu, ATcpu, N * N); if(err) { fprintf (stderr, "Transpose failed: %d\n", err); } else { fprintf (stderr, "Transpose successful\n"); } free (A); free (ATgpu); free (ATcpu); return 0; }
0369e636537d8aff9912aee91c902bcdcbd7a960.cu
#include <stdlib.h> #include <stdio.h> #include "cuda_utils.h" #include "timer.c" typedef float dtype; #define BLOCK_DIM_Y 4 #define PATCH_DIM 32 __global__ void matTrans(dtype* AT, dtype* A, int N) { /* Fill your code here */ __shared__ dtype scratch[PATCH_DIM][PATCH_DIM+1]; int x = blockIdx.x * PATCH_DIM + threadIdx.x; int y = blockIdx.y * PATCH_DIM + threadIdx.y; int i; if (x < N){ for (i = 0; i < PATCH_DIM && y < N; i += BLOCK_DIM_Y, y += BLOCK_DIM_Y){ scratch[threadIdx.y + i][threadIdx.x] = A[y * N + x]; } } x = blockIdx.y * PATCH_DIM + threadIdx.x; y = blockIdx.x * PATCH_DIM + threadIdx.y; __syncthreads(); if (x < N){ for (i = 0; i < PATCH_DIM && y < N; i += BLOCK_DIM_Y, y += BLOCK_DIM_Y){ AT[y * N + x] = scratch[threadIdx.x][threadIdx.y + i] ; } } } void parseArg (int argc, char** argv, int* N) { if(argc == 2) { *N = atoi (argv[1]); assert (*N > 0); } else { fprintf (stderr, "usage: %s <N>\n", argv[0]); exit (EXIT_FAILURE); } } void initArr (dtype* in, int N) { int i; for(i = 0; i < N; i++) { in[i] = (dtype) rand () / RAND_MAX; } } void cpuTranspose (dtype* A, dtype* AT, int N) { int i, j; for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { AT[j * N + i] = A[i * N + j]; } } } int cmpArr (dtype* a, dtype* b, int N) { int cnt, i; cnt = 0; for(i = 0; i < N; i++) { if(abs(a[i] - b[i]) > 1e-6) cnt++; } return cnt; } void gpuTranspose (dtype* A, dtype* AT, int N) { dtype *d_idata, *d_odata; CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * N * sizeof (dtype))); CUDA_CHECK_ERROR (cudaMalloc (&d_odata, N * N * sizeof (dtype))); CUDA_CHECK_ERROR (cudaMemcpy (d_idata, A, N * N * sizeof (dtype), cudaMemcpyHostToDevice)); dim3 gb((N + PATCH_DIM - 1 / PATCH_DIM), (N + PATCH_DIM - 1) / PATCH_DIM, 1); dim3 tb(PATCH_DIM, BLOCK_DIM_Y, 1); matTrans <<<gb, tb>>> (d_odata, d_idata, N); struct stopwatch_t* timer = NULL; long double t_gpu; /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* run your kernel here */ matTrans <<<gb, tb>>> (d_odata, d_idata, N); cudaThreadSynchronize (); t_gpu = stopwatch_stop (timer); fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 ); CUDA_CHECK_ERROR (cudaMemcpy (AT, d_odata, N * N * sizeof (dtype), cudaMemcpyDeviceToHost)); } int main(int argc, char** argv) { /* variables */ dtype *A, *ATgpu, *ATcpu; int err; int N; struct stopwatch_t* timer = NULL; long double t_cpu; N = -1; parseArg (argc, argv, &N); /* input and output matrices on host */ /* output */ ATcpu = (dtype*) malloc (N * N * sizeof (dtype)); ATgpu = (dtype*) malloc (N * N * sizeof (dtype)); /* input */ A = (dtype*) malloc (N * N * sizeof (dtype)); initArr (A, N * N); /* GPU transpose kernel */ gpuTranspose (A, ATgpu, N); /* Setup timers */ stopwatch_init (); timer = stopwatch_create (); stopwatch_start (timer); /* compute reference array */ cpuTranspose (A, ATcpu, N); t_cpu = stopwatch_stop (timer); fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n", t_cpu); /* check correctness */ err = cmpArr (ATgpu, ATcpu, N * N); if(err) { fprintf (stderr, "Transpose failed: %d\n", err); } else { fprintf (stderr, "Transpose successful\n"); } free (A); free (ATgpu); free (ATcpu); return 0; }
7361c4a7a5f25c61f6a397ec0710df1ba075e302.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tfcc_cudasegmentinterface.h" #include "exceptions/tfcc_cudaruntimeerror.h" #include "exceptions/tfcc_invalidargumenterror.h" #include "exceptions/tfcc_notimplementederror.h" #include "framework/tfcc_cudasession.h" #include "framework/tfcc_session.h" #include "framework/tfcc_types.h" namespace tfcc { template <class T> static __global__ void _cuda_unsorted_segment_sum(const T* a, unsigned batch, unsigned k, const int* ids, unsigned sum, T* b) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * k; for (unsigned i = tid; i < total; i += skip) { unsigned ps1 = i / k; unsigned ps2 = i % k; int idx = ids[ps1]; if (idx < 0) { continue; } atomicAdd(b + idx * k + ps2, a[i]); } } // helper functions template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<int32_t, T>::value || std::is_same<uint32_t, T>::value, Variable<T>>::type _unsorted_segment_sum_helper(const Tensor<T>& a, const Tensor<int>& ids, unsigned num, size_t blockCount, size_t threadCount) { std::vector<unsigned> s = a.shape().toVector(); s[0] = num; Variable<T> result(std::move(s)); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); hipError_t ret = hipMemsetAsync(result.data(), 0, result.size() * sizeof(T), session->getImpl()->cudaStream()); if (ret != hipSuccess) throw CUDARuntimeError(ret); hipLaunchKernelGGL(( _cuda_unsorted_segment_sum), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), a.data(), a.shape(0), a.size() / a.shape(0), ids.data(), num, result.data()); ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } template <class T, class ST> static inline Variable<T> _unsorted_segment_sum_helper(const Tensor<T>& a, const Tensor<int>& ids, unsigned num, ST blockCount, ST threadCount) { throw NotImplementedError(); } template <class T> CUDASegmentInterface<T>::CUDASegmentInterface(const CUDADeviceProperty& property) : _property(property) { } template <class T> CUDASegmentInterface<T>::~CUDASegmentInterface() { } template <class T> Variable<T> CUDASegmentInterface<T>::unsortedSegmentSum(const Tensor<T>& a, const Tensor<int>& ids, unsigned num) { size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); return _unsorted_segment_sum_helper(a, ids, num, blockCount, threadCount); } #define DEFINE_FUNC(type) template class CUDASegmentInterface<type>; TFCC_FOR_ALL_TYPES(DEFINE_FUNC); } // namespace tfcc
7361c4a7a5f25c61f6a397ec0710df1ba075e302.cu
#include "tfcc_cudasegmentinterface.h" #include "exceptions/tfcc_cudaruntimeerror.h" #include "exceptions/tfcc_invalidargumenterror.h" #include "exceptions/tfcc_notimplementederror.h" #include "framework/tfcc_cudasession.h" #include "framework/tfcc_session.h" #include "framework/tfcc_types.h" namespace tfcc { template <class T> static __global__ void _cuda_unsorted_segment_sum(const T* a, unsigned batch, unsigned k, const int* ids, unsigned sum, T* b) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; const unsigned total = batch * k; for (unsigned i = tid; i < total; i += skip) { unsigned ps1 = i / k; unsigned ps2 = i % k; int idx = ids[ps1]; if (idx < 0) { continue; } atomicAdd(b + idx * k + ps2, a[i]); } } // helper functions template <class T> static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<int32_t, T>::value || std::is_same<uint32_t, T>::value, Variable<T>>::type _unsorted_segment_sum_helper(const Tensor<T>& a, const Tensor<int>& ids, unsigned num, size_t blockCount, size_t threadCount) { std::vector<unsigned> s = a.shape().toVector(); s[0] = num; Variable<T> result(std::move(s)); tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault()); cudaError_t ret = cudaMemsetAsync(result.data(), 0, result.size() * sizeof(T), session->getImpl()->cudaStream()); if (ret != cudaSuccess) throw CUDARuntimeError(ret); _cuda_unsorted_segment_sum<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( a.data(), a.shape(0), a.size() / a.shape(0), ids.data(), num, result.data()); ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } template <class T, class ST> static inline Variable<T> _unsorted_segment_sum_helper(const Tensor<T>& a, const Tensor<int>& ids, unsigned num, ST blockCount, ST threadCount) { throw NotImplementedError(); } template <class T> CUDASegmentInterface<T>::CUDASegmentInterface(const CUDADeviceProperty& property) : _property(property) { } template <class T> CUDASegmentInterface<T>::~CUDASegmentInterface() { } template <class T> Variable<T> CUDASegmentInterface<T>::unsortedSegmentSum(const Tensor<T>& a, const Tensor<int>& ids, unsigned num) { size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); return _unsorted_segment_sum_helper(a, ids, num, blockCount, threadCount); } #define DEFINE_FUNC(type) template class CUDASegmentInterface<type>; TFCC_FOR_ALL_TYPES(DEFINE_FUNC); } // namespace tfcc
944ffddd2288a84b1226c3dd5a49bbd1fa8312c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/auc_kernel.h" namespace phi { using paddle::platform::PADDLE_CUDA_NUM_THREADS; __global__ void ClearObsoleteDataKernel(int64_t *pos, int64_t *neg, const int bucket_length, const int slide_steps) { int cur_step_index = static_cast<int>(pos[(slide_steps + 1) * bucket_length]) % slide_steps; int cur_step_begin = cur_step_index * bucket_length; int sum_step_begin = slide_steps * bucket_length; CUDA_KERNEL_LOOP(i, bucket_length) { pos[sum_step_begin + i] -= pos[cur_step_begin + i]; neg[sum_step_begin + i] -= neg[cur_step_begin + i]; pos[cur_step_begin + i] = neg[cur_step_begin + i] = 0; } } __global__ void UpdateSumDataKernel(int64_t *pos, int64_t *neg, const int bucket_length, const int slide_steps) { int cur_step_index = static_cast<int>(pos[(slide_steps + 1) * bucket_length]) % slide_steps; int cur_step_begin = cur_step_index * bucket_length; int sum_step_begin = slide_steps * bucket_length; CUDA_KERNEL_LOOP(i, bucket_length) { pos[sum_step_begin + i] += pos[cur_step_begin + i]; neg[sum_step_begin + i] += neg[cur_step_begin + i]; } } template <typename T> __global__ void AddDataKernel(const int64_t *label_data, const T *pred_data, const int inference_width, const int num_thresholds, int64_t *pos, int64_t *neg, const int numel, const int slide_steps) { int cur_step_begin = 0; if (slide_steps > 0) { int cur_step_index = static_cast<int>(pos[(slide_steps + 1) * (1 + num_thresholds)]) % slide_steps; cur_step_begin = cur_step_index * (1 + num_thresholds); } CUDA_KERNEL_LOOP(i, numel) { auto predict_data = pred_data[i * inference_width + (inference_width - 1)]; PADDLE_ENFORCE(predict_data <= 1, "The predict data must less or equal 1."); PADDLE_ENFORCE(predict_data >= 0, "The predict data must gather or equal 0."); uint32_t binIdx = static_cast<uint32_t>(predict_data * num_thresholds); if (label_data[i]) { paddle::platform::CudaAtomicAdd(pos + cur_step_begin + binIdx, 1); } else { paddle::platform::CudaAtomicAdd(neg + cur_step_begin + binIdx, 1); } } } __global__ void CalcAucKernel(int64_t *stat_pos, int64_t *stat_neg, int num_thresholds, double *auc, bool need_add_batch_num) { *auc = 0.0f; double totPos = 0.0; double totNeg = 0.0; double totPosPrev = 0.0; double totNegPrev = 0.0; int idx = num_thresholds; while (idx >= 0) { totPosPrev = totPos; totNegPrev = totNeg; totPos += stat_pos[idx]; totNeg += stat_neg[idx]; *auc += (totNeg - totNegPrev) * (totPos + totPosPrev) / 2.0; --idx; } if (totPos > 0.0 && totNeg > 0.0) { *auc = *auc / totPos / totNeg; } if (need_add_batch_num) { stat_pos[num_thresholds + 1] += 1; stat_neg[num_thresholds + 1] += 1; } } inline static double trapezoidArea(double X1, double X2, double Y1, double Y2) { return (X1 > X2 ? (X1 - X2) : (X2 - X1)) * (Y1 + Y2) / 2.0; } template <typename T, typename Context> void statAuc(const Context &dev_ctx, const DenseTensor &label, const DenseTensor &predict, const int num_thresholds, const int slide_steps, int64_t *origin_stat_pos, int64_t *origin_stat_neg) { size_t batch_size = predict.dims()[0]; size_t inference_width = predict.dims()[1]; const T *inference_data = predict.data<T>(); const auto *label_data = label.data<int64_t>(); const int bucket_length = num_thresholds + 1; if (slide_steps == 0) { hipLaunchKernelGGL(( AddDataKernel), (batch_size + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, dev_ctx.stream(), label_data, inference_data, inference_width, num_thresholds, origin_stat_pos, origin_stat_neg, batch_size, slide_steps); return; } // the last number of origin_stat_pos store the index should be used in // current step int cur_step_index = static_cast<int>(origin_stat_pos[(slide_steps + 1) * bucket_length]) % slide_steps; int cur_step_begin = cur_step_index * bucket_length; int sum_step_begin = slide_steps * bucket_length; hipLaunchKernelGGL(( ClearObsoleteDataKernel), (bucket_length + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, dev_ctx.stream(), origin_stat_pos, origin_stat_neg, bucket_length, slide_steps); hipLaunchKernelGGL(( AddDataKernel), (batch_size + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, dev_ctx.stream(), label_data, inference_data, inference_width, num_thresholds, origin_stat_pos, origin_stat_neg, batch_size, slide_steps); hipLaunchKernelGGL(( UpdateSumDataKernel), (bucket_length + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, dev_ctx.stream(), origin_stat_pos, origin_stat_neg, bucket_length, slide_steps); } template <typename T, typename Context> void AucKernel(const Context &dev_ctx, const DenseTensor &input, const DenseTensor &label, const DenseTensor &stat_pos, const DenseTensor &stat_neg, const std::string &curve, int num_thresholds, int slide_steps, DenseTensor *auc, DenseTensor *stat_pos_out, DenseTensor *stat_neg_out) { // Only use output var for now, make sure it's persistable and // not cleaned up for each batch. auto *origin_stat_pos = dev_ctx.template Alloc<int64_t>(stat_pos_out); auto *origin_stat_neg = dev_ctx.template Alloc<int64_t>(stat_neg_out); auto *auc_value = dev_ctx.template Alloc<double>(auc); auto *stat_pos_in_tensor = &stat_pos; auto *stat_neg_in_tensor = &stat_neg; auto *pos_in_data = stat_pos.data<int64_t>(); auto *neg_in_data = stat_neg.data<int64_t>(); #ifdef PADDLE_WITH_CUDA if (stat_pos_in_tensor != stat_pos_out) { hipMemcpy( origin_stat_pos, pos_in_data, ((1 + slide_steps) * (num_thresholds + 1) + (slide_steps > 0 ? 1 : 0)) * sizeof(int64_t), hipMemcpyDeviceToDevice); } if (stat_neg_in_tensor != stat_neg_out) { hipMemcpy( origin_stat_neg, neg_in_data, ((1 + slide_steps) * (num_thresholds + 1) + (slide_steps > 0 ? 1 : 0)) * sizeof(int64_t), hipMemcpyDeviceToDevice); } #else if (stat_pos_in_tensor != stat_pos_out) { hipMemcpy( origin_stat_pos, pos_in_data, ((1 + slide_steps) * (num_thresholds + 1) + (slide_steps > 0 ? 1 : 0)) * sizeof(int64_t), hipMemcpyDeviceToDevice); } if (stat_neg_in_tensor != stat_neg_out) { hipMemcpy( origin_stat_neg, neg_in_data, ((1 + slide_steps) * (num_thresholds + 1) + (slide_steps > 0 ? 1 : 0)) * sizeof(int64_t), hipMemcpyDeviceToDevice); } #endif statAuc<T, Context>(dev_ctx, label, input, num_thresholds, slide_steps, origin_stat_pos, origin_stat_neg); int sum_offset = slide_steps * (num_thresholds + 1); hipLaunchKernelGGL(( CalcAucKernel), dim3(1), dim3(1), 0, dev_ctx.stream(), origin_stat_pos + sum_offset, origin_stat_neg + sum_offset, num_thresholds, auc_value, slide_steps > 0); } } // namespace phi PD_REGISTER_KERNEL(auc, GPU, ALL_LAYOUT, phi::AucKernel, float) {}
944ffddd2288a84b1226c3dd5a49bbd1fa8312c3.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/auc_kernel.h" namespace phi { using paddle::platform::PADDLE_CUDA_NUM_THREADS; __global__ void ClearObsoleteDataKernel(int64_t *pos, int64_t *neg, const int bucket_length, const int slide_steps) { int cur_step_index = static_cast<int>(pos[(slide_steps + 1) * bucket_length]) % slide_steps; int cur_step_begin = cur_step_index * bucket_length; int sum_step_begin = slide_steps * bucket_length; CUDA_KERNEL_LOOP(i, bucket_length) { pos[sum_step_begin + i] -= pos[cur_step_begin + i]; neg[sum_step_begin + i] -= neg[cur_step_begin + i]; pos[cur_step_begin + i] = neg[cur_step_begin + i] = 0; } } __global__ void UpdateSumDataKernel(int64_t *pos, int64_t *neg, const int bucket_length, const int slide_steps) { int cur_step_index = static_cast<int>(pos[(slide_steps + 1) * bucket_length]) % slide_steps; int cur_step_begin = cur_step_index * bucket_length; int sum_step_begin = slide_steps * bucket_length; CUDA_KERNEL_LOOP(i, bucket_length) { pos[sum_step_begin + i] += pos[cur_step_begin + i]; neg[sum_step_begin + i] += neg[cur_step_begin + i]; } } template <typename T> __global__ void AddDataKernel(const int64_t *label_data, const T *pred_data, const int inference_width, const int num_thresholds, int64_t *pos, int64_t *neg, const int numel, const int slide_steps) { int cur_step_begin = 0; if (slide_steps > 0) { int cur_step_index = static_cast<int>(pos[(slide_steps + 1) * (1 + num_thresholds)]) % slide_steps; cur_step_begin = cur_step_index * (1 + num_thresholds); } CUDA_KERNEL_LOOP(i, numel) { auto predict_data = pred_data[i * inference_width + (inference_width - 1)]; PADDLE_ENFORCE(predict_data <= 1, "The predict data must less or equal 1."); PADDLE_ENFORCE(predict_data >= 0, "The predict data must gather or equal 0."); uint32_t binIdx = static_cast<uint32_t>(predict_data * num_thresholds); if (label_data[i]) { paddle::platform::CudaAtomicAdd(pos + cur_step_begin + binIdx, 1); } else { paddle::platform::CudaAtomicAdd(neg + cur_step_begin + binIdx, 1); } } } __global__ void CalcAucKernel(int64_t *stat_pos, int64_t *stat_neg, int num_thresholds, double *auc, bool need_add_batch_num) { *auc = 0.0f; double totPos = 0.0; double totNeg = 0.0; double totPosPrev = 0.0; double totNegPrev = 0.0; int idx = num_thresholds; while (idx >= 0) { totPosPrev = totPos; totNegPrev = totNeg; totPos += stat_pos[idx]; totNeg += stat_neg[idx]; *auc += (totNeg - totNegPrev) * (totPos + totPosPrev) / 2.0; --idx; } if (totPos > 0.0 && totNeg > 0.0) { *auc = *auc / totPos / totNeg; } if (need_add_batch_num) { stat_pos[num_thresholds + 1] += 1; stat_neg[num_thresholds + 1] += 1; } } inline static double trapezoidArea(double X1, double X2, double Y1, double Y2) { return (X1 > X2 ? (X1 - X2) : (X2 - X1)) * (Y1 + Y2) / 2.0; } template <typename T, typename Context> void statAuc(const Context &dev_ctx, const DenseTensor &label, const DenseTensor &predict, const int num_thresholds, const int slide_steps, int64_t *origin_stat_pos, int64_t *origin_stat_neg) { size_t batch_size = predict.dims()[0]; size_t inference_width = predict.dims()[1]; const T *inference_data = predict.data<T>(); const auto *label_data = label.data<int64_t>(); const int bucket_length = num_thresholds + 1; if (slide_steps == 0) { AddDataKernel<<<(batch_size + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>(label_data, inference_data, inference_width, num_thresholds, origin_stat_pos, origin_stat_neg, batch_size, slide_steps); return; } // the last number of origin_stat_pos store the index should be used in // current step int cur_step_index = static_cast<int>(origin_stat_pos[(slide_steps + 1) * bucket_length]) % slide_steps; int cur_step_begin = cur_step_index * bucket_length; int sum_step_begin = slide_steps * bucket_length; ClearObsoleteDataKernel<<<(bucket_length + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>( origin_stat_pos, origin_stat_neg, bucket_length, slide_steps); AddDataKernel<<<(batch_size + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>(label_data, inference_data, inference_width, num_thresholds, origin_stat_pos, origin_stat_neg, batch_size, slide_steps); UpdateSumDataKernel<<<(bucket_length + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>( origin_stat_pos, origin_stat_neg, bucket_length, slide_steps); } template <typename T, typename Context> void AucKernel(const Context &dev_ctx, const DenseTensor &input, const DenseTensor &label, const DenseTensor &stat_pos, const DenseTensor &stat_neg, const std::string &curve, int num_thresholds, int slide_steps, DenseTensor *auc, DenseTensor *stat_pos_out, DenseTensor *stat_neg_out) { // Only use output var for now, make sure it's persistable and // not cleaned up for each batch. auto *origin_stat_pos = dev_ctx.template Alloc<int64_t>(stat_pos_out); auto *origin_stat_neg = dev_ctx.template Alloc<int64_t>(stat_neg_out); auto *auc_value = dev_ctx.template Alloc<double>(auc); auto *stat_pos_in_tensor = &stat_pos; auto *stat_neg_in_tensor = &stat_neg; auto *pos_in_data = stat_pos.data<int64_t>(); auto *neg_in_data = stat_neg.data<int64_t>(); #ifdef PADDLE_WITH_CUDA if (stat_pos_in_tensor != stat_pos_out) { cudaMemcpy( origin_stat_pos, pos_in_data, ((1 + slide_steps) * (num_thresholds + 1) + (slide_steps > 0 ? 1 : 0)) * sizeof(int64_t), cudaMemcpyDeviceToDevice); } if (stat_neg_in_tensor != stat_neg_out) { cudaMemcpy( origin_stat_neg, neg_in_data, ((1 + slide_steps) * (num_thresholds + 1) + (slide_steps > 0 ? 1 : 0)) * sizeof(int64_t), cudaMemcpyDeviceToDevice); } #else if (stat_pos_in_tensor != stat_pos_out) { hipMemcpy( origin_stat_pos, pos_in_data, ((1 + slide_steps) * (num_thresholds + 1) + (slide_steps > 0 ? 1 : 0)) * sizeof(int64_t), hipMemcpyDeviceToDevice); } if (stat_neg_in_tensor != stat_neg_out) { hipMemcpy( origin_stat_neg, neg_in_data, ((1 + slide_steps) * (num_thresholds + 1) + (slide_steps > 0 ? 1 : 0)) * sizeof(int64_t), hipMemcpyDeviceToDevice); } #endif statAuc<T, Context>(dev_ctx, label, input, num_thresholds, slide_steps, origin_stat_pos, origin_stat_neg); int sum_offset = slide_steps * (num_thresholds + 1); CalcAucKernel<<<1, 1, 0, dev_ctx.stream()>>>(origin_stat_pos + sum_offset, origin_stat_neg + sum_offset, num_thresholds, auc_value, slide_steps > 0); } } // namespace phi PD_REGISTER_KERNEL(auc, GPU, ALL_LAYOUT, phi::AucKernel, float) {}
9e42f906f0b61af728ad5fbed4763358346bd176.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h> __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
9e42f906f0b61af728ad5fbed4763358346bd176.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h> __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 64>::kMinBlocksPerSm) fmha_cutlassB_f32_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 64>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 64>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
a84b92be3631e31faa76642c2a61224bb2810f3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void square_array(float *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<N) a[idx] = a[idx] * a[idx]; }
a84b92be3631e31faa76642c2a61224bb2810f3a.cu
#include "includes.h" __global__ void square_array(float *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<N) a[idx] = a[idx] * a[idx]; }
eb0ce0c17acf9dccb81e4db1e22e448d77896173.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Jim Samson // 04 April 2019 // Cuda Minimum Finding // Homework Part 2 // #include <stdio.h> #include <limits.h> #define HIGHEST_VALUE 8000000 #define THREADS 8 __global__ void findLowest(int numMin, int *array_val, int *cudaResult ) { int low = threadIdx.x * numMin; int high = low + numMin -1; int min = array_val[low]; for (unsigned int i = low; i < high; i++){ if(array_val[i] < min){ min = array_val[i]; } } cudaResult[threadIdx.x] = min; printf("Thread %d returned: %d \n", threadIdx.x, min); } int main() { int *array_val; int *cudaResult; int min = INT_MAX; int testMin = INT_MAX; int *cuda_return; int *dev_a; array_val = (int *) malloc(sizeof(int)*HIGHEST_VALUE); cudaResult = (int *) malloc(sizeof(int)*THREADS); for(unsigned int i = 0; i < HIGHEST_VALUE; i++) { array_val[i] = rand() % 100000; if (testMin > array_val[i]){ testMin = array_val[i]; } } printf("Minimum value is: %d \n", testMin); int numMin = HIGHEST_VALUE / THREADS; hipMalloc((void**)&cuda_return, HIGHEST_VALUE*sizeof(int)); hipMalloc((void**)&dev_a, HIGHEST_VALUE*sizeof(int)); hipMemcpy(dev_a, array_val, HIGHEST_VALUE*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_return, cudaResult, THREADS*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( findLowest), dim3(1),dim3(8), 0, 0, numMin, dev_a, cuda_return); hipMemcpy(cudaResult, cuda_return, THREADS*sizeof(int), hipMemcpyDeviceToHost); for(unsigned int i = 0; i < THREADS; i++) { if(min > cudaResult[i]) { min = cudaResult[i]; } } hipFree(cuda_return); hipFree(dev_a); printf("The Cuda Value is %d \n", min); }
eb0ce0c17acf9dccb81e4db1e22e448d77896173.cu
// Jim Samson // 04 April 2019 // Cuda Minimum Finding // Homework Part 2 // #include <stdio.h> #include <limits.h> #define HIGHEST_VALUE 8000000 #define THREADS 8 __global__ void findLowest(int numMin, int *array_val, int *cudaResult ) { int low = threadIdx.x * numMin; int high = low + numMin -1; int min = array_val[low]; for (unsigned int i = low; i < high; i++){ if(array_val[i] < min){ min = array_val[i]; } } cudaResult[threadIdx.x] = min; printf("Thread %d returned: %d \n", threadIdx.x, min); } int main() { int *array_val; int *cudaResult; int min = INT_MAX; int testMin = INT_MAX; int *cuda_return; int *dev_a; array_val = (int *) malloc(sizeof(int)*HIGHEST_VALUE); cudaResult = (int *) malloc(sizeof(int)*THREADS); for(unsigned int i = 0; i < HIGHEST_VALUE; i++) { array_val[i] = rand() % 100000; if (testMin > array_val[i]){ testMin = array_val[i]; } } printf("Minimum value is: %d \n", testMin); int numMin = HIGHEST_VALUE / THREADS; cudaMalloc((void**)&cuda_return, HIGHEST_VALUE*sizeof(int)); cudaMalloc((void**)&dev_a, HIGHEST_VALUE*sizeof(int)); cudaMemcpy(dev_a, array_val, HIGHEST_VALUE*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_return, cudaResult, THREADS*sizeof(int), cudaMemcpyHostToDevice); findLowest<<<1,8>>>(numMin, dev_a, cuda_return); cudaMemcpy(cudaResult, cuda_return, THREADS*sizeof(int), cudaMemcpyDeviceToHost); for(unsigned int i = 0; i < THREADS; i++) { if(min > cudaResult[i]) { min = cudaResult[i]; } } cudaFree(cuda_return); cudaFree(dev_a); printf("The Cuda Value is %d \n", min); }
c1d4ee3e6e3122bd599ee1ffb63199b722fdb17e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" //#include <boost/graph/buffer_concepts.hpp> namespace pcl { namespace device { namespace kinfuLS { __global__ void computeVmapKernel (const PtrStepSz<unsigned short> depth, PtrStep<float> vmap, float fx_inv, float fy_inv, float cx, float cy) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if (u < depth.cols && v < depth.rows) { float z = depth.ptr (v)[u] / 1000.f; // load and convert: mm -> meters if (z != 0) { float vx = z * (u - cx) * fx_inv; float vy = z * (v - cy) * fy_inv; float vz = z; vmap.ptr (v )[u] = vx; vmap.ptr (v + depth.rows )[u] = vy; vmap.ptr (v + depth.rows * 2)[u] = vz; } else vmap.ptr (v)[u] = numeric_limits<float>::quiet_NaN (); } } __global__ void computeNmapKernel (int rows, int cols, const PtrStep<float> vmap, PtrStep<float> nmap) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if (u >= cols || v >= rows) return; if (u == cols - 1 || v == rows - 1) { nmap.ptr (v)[u] = numeric_limits<float>::quiet_NaN (); return; } float3 v00, v01, v10; v00.x = vmap.ptr (v )[u]; v01.x = vmap.ptr (v )[u + 1]; v10.x = vmap.ptr (v + 1)[u]; if (!isnan (v00.x) && !isnan (v01.x) && !isnan (v10.x)) { v00.y = vmap.ptr (v + rows)[u]; v01.y = vmap.ptr (v + rows)[u + 1]; v10.y = vmap.ptr (v + 1 + rows)[u]; v00.z = vmap.ptr (v + 2 * rows)[u]; v01.z = vmap.ptr (v + 2 * rows)[u + 1]; v10.z = vmap.ptr (v + 1 + 2 * rows)[u]; float3 r = normalized (cross (v01 - v00, v10 - v00)); nmap.ptr (v )[u] = r.x; nmap.ptr (v + rows)[u] = r.y; nmap.ptr (v + 2 * rows)[u] = r.z; } else nmap.ptr (v)[u] = numeric_limits<float>::quiet_NaN (); } } } } namespace pcl { namespace device { namespace kinfuLS { ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void createVMap (const Intr& intr, const DepthMap& depth, MapArr& vmap) { vmap.create (depth.rows () * 3, depth.cols ()); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (depth.cols (), block.x); grid.y = divUp (depth.rows (), block.y); float fx = intr.fx, cx = intr.cx; float fy = intr.fy, cy = intr.cy; hipLaunchKernelGGL(( computeVmapKernel), dim3(grid), dim3(block), 0, 0, depth, vmap, 1.f / fx, 1.f / fy, cx, cy); cudaSafeCall (hipGetLastError ()); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void createNMap (const MapArr& vmap, MapArr& nmap) { nmap.create (vmap.rows (), vmap.cols ()); int rows = vmap.rows () / 3; int cols = vmap.cols (); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); hipLaunchKernelGGL(( computeNmapKernel), dim3(grid), dim3(block), 0, 0, rows, cols, vmap, nmap); cudaSafeCall (hipGetLastError ()); } } } } namespace pcl { namespace device { namespace kinfuLS { __global__ void transformMapsKernel (int rows, int cols, const PtrStep<float> vmap_src, const PtrStep<float> nmap_src, const Mat33 Rmat, const float3 tvec, PtrStepSz<float> vmap_dst, PtrStep<float> nmap_dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; const float qnan = numeric_limits<float>::quiet_NaN (); if (x < cols && y < rows) { //vetexes float3 vsrc, vdst = make_float3 (qnan, qnan, qnan); vsrc.x = vmap_src.ptr (y)[x]; if (!isnan (vsrc.x)) { vsrc.y = vmap_src.ptr (y + rows)[x]; vsrc.z = vmap_src.ptr (y + 2 * rows)[x]; vdst = Rmat * vsrc + tvec; vmap_dst.ptr (y + rows)[x] = vdst.y; vmap_dst.ptr (y + 2 * rows)[x] = vdst.z; } vmap_dst.ptr (y)[x] = vdst.x; //normals float3 nsrc, ndst = make_float3 (qnan, qnan, qnan); nsrc.x = nmap_src.ptr (y)[x]; if (!isnan (nsrc.x)) { nsrc.y = nmap_src.ptr (y + rows)[x]; nsrc.z = nmap_src.ptr (y + 2 * rows)[x]; ndst = Rmat * nsrc; nmap_dst.ptr (y + rows)[x] = ndst.y; nmap_dst.ptr (y + 2 * rows)[x] = ndst.z; } nmap_dst.ptr (y)[x] = ndst.x; } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void transformMaps (const MapArr& vmap_src, const MapArr& nmap_src, const Mat33& Rmat, const float3& tvec, MapArr& vmap_dst, MapArr& nmap_dst) { int cols = vmap_src.cols (); int rows = vmap_src.rows () / 3; vmap_dst.create (rows * 3, cols); nmap_dst.create (rows * 3, cols); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); hipLaunchKernelGGL(( transformMapsKernel), dim3(grid), dim3(block), 0, 0, rows, cols, vmap_src, nmap_src, Rmat, tvec, vmap_dst, nmap_dst); cudaSafeCall (hipGetLastError ()); cudaSafeCall (hipDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { template<bool normalize> __global__ void resizeMapKernel (int drows, int dcols, int srows, const PtrStep<float> input, PtrStep<float> output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= dcols || y >= drows) return; const float qnan = numeric_limits<float>::quiet_NaN (); int xs = x * 2; int ys = y * 2; float x00 = input.ptr (ys + 0)[xs + 0]; float x01 = input.ptr (ys + 0)[xs + 1]; float x10 = input.ptr (ys + 1)[xs + 0]; float x11 = input.ptr (ys + 1)[xs + 1]; if (isnan (x00) || isnan (x01) || isnan (x10) || isnan (x11)) { output.ptr (y)[x] = qnan; return; } else { float3 n; n.x = (x00 + x01 + x10 + x11) / 4; float y00 = input.ptr (ys + srows + 0)[xs + 0]; float y01 = input.ptr (ys + srows + 0)[xs + 1]; float y10 = input.ptr (ys + srows + 1)[xs + 0]; float y11 = input.ptr (ys + srows + 1)[xs + 1]; n.y = (y00 + y01 + y10 + y11) / 4; float z00 = input.ptr (ys + 2 * srows + 0)[xs + 0]; float z01 = input.ptr (ys + 2 * srows + 0)[xs + 1]; float z10 = input.ptr (ys + 2 * srows + 1)[xs + 0]; float z11 = input.ptr (ys + 2 * srows + 1)[xs + 1]; n.z = (z00 + z01 + z10 + z11) / 4; if (normalize) n = normalized (n); output.ptr (y )[x] = n.x; output.ptr (y + drows)[x] = n.y; output.ptr (y + 2 * drows)[x] = n.z; } } template<bool normalize> void resizeMap (const MapArr& input, MapArr& output) { int in_cols = input.cols (); int in_rows = input.rows () / 3; int out_cols = in_cols / 2; int out_rows = in_rows / 2; output.create (out_rows * 3, out_cols); dim3 block (32, 8); dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y)); resizeMapKernel<normalize><< < grid, block>>>(out_rows, out_cols, in_rows, input, output); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void resizeVMap (const MapArr& input, MapArr& output) { resizeMap<false>(input, output); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void resizeNMap (const MapArr& input, MapArr& output) { resizeMap<true>(input, output); } } } } namespace pcl { namespace device { namespace kinfuLS { template<typename T> __global__ void convertMapKernel (int rows, int cols, const PtrStep<float> map, PtrStep<T> output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= cols || y >= rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); T t; t.x = map.ptr (y)[x]; if (!isnan (t.x)) { t.y = map.ptr (y + rows)[x]; t.z = map.ptr (y + 2 * rows)[x]; } else t.y = t.z = qnan; output.ptr (y)[x] = t; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> void convert (const MapArr& vmap, DeviceArray2D<T>& output) { int cols = vmap.cols (); int rows = vmap.rows () / 3; output.create (rows, cols); dim3 block (32, 8); dim3 grid (divUp (cols, block.x), divUp (rows, block.y)); convertMapKernel<T><< < grid, block>>>(rows, cols, vmap, output); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } template void convert (const MapArr& vmap, DeviceArray2D<float4>& output); template void convert (const MapArr& vmap, DeviceArray2D<float8>& output); } } } namespace pcl { namespace device { namespace kinfuLS { __global__ void mergePointNormalKernel (const float4* cloud, const float8* normals, PtrSz<float12> output) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < output.size) { float4 p = cloud[idx]; float8 n = normals[idx]; float12 o; o.x = p.x; o.y = p.y; o.z = p.z; o.normal_x = n.x; o.normal_y = n.y; o.normal_z = n.z; output.data[idx] = o; } } void mergePointNormal (const DeviceArray<float4>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output) { const int block = 256; int total = (int)output.size (); hipLaunchKernelGGL(( mergePointNormalKernel), dim3(divUp (total, block)), dim3(block), 0, 0, cloud, normals, output); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } } } }
c1d4ee3e6e3122bd599ee1ffb63199b722fdb17e.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" //#include <boost/graph/buffer_concepts.hpp> namespace pcl { namespace device { namespace kinfuLS { __global__ void computeVmapKernel (const PtrStepSz<unsigned short> depth, PtrStep<float> vmap, float fx_inv, float fy_inv, float cx, float cy) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if (u < depth.cols && v < depth.rows) { float z = depth.ptr (v)[u] / 1000.f; // load and convert: mm -> meters if (z != 0) { float vx = z * (u - cx) * fx_inv; float vy = z * (v - cy) * fy_inv; float vz = z; vmap.ptr (v )[u] = vx; vmap.ptr (v + depth.rows )[u] = vy; vmap.ptr (v + depth.rows * 2)[u] = vz; } else vmap.ptr (v)[u] = numeric_limits<float>::quiet_NaN (); } } __global__ void computeNmapKernel (int rows, int cols, const PtrStep<float> vmap, PtrStep<float> nmap) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if (u >= cols || v >= rows) return; if (u == cols - 1 || v == rows - 1) { nmap.ptr (v)[u] = numeric_limits<float>::quiet_NaN (); return; } float3 v00, v01, v10; v00.x = vmap.ptr (v )[u]; v01.x = vmap.ptr (v )[u + 1]; v10.x = vmap.ptr (v + 1)[u]; if (!isnan (v00.x) && !isnan (v01.x) && !isnan (v10.x)) { v00.y = vmap.ptr (v + rows)[u]; v01.y = vmap.ptr (v + rows)[u + 1]; v10.y = vmap.ptr (v + 1 + rows)[u]; v00.z = vmap.ptr (v + 2 * rows)[u]; v01.z = vmap.ptr (v + 2 * rows)[u + 1]; v10.z = vmap.ptr (v + 1 + 2 * rows)[u]; float3 r = normalized (cross (v01 - v00, v10 - v00)); nmap.ptr (v )[u] = r.x; nmap.ptr (v + rows)[u] = r.y; nmap.ptr (v + 2 * rows)[u] = r.z; } else nmap.ptr (v)[u] = numeric_limits<float>::quiet_NaN (); } } } } namespace pcl { namespace device { namespace kinfuLS { ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void createVMap (const Intr& intr, const DepthMap& depth, MapArr& vmap) { vmap.create (depth.rows () * 3, depth.cols ()); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (depth.cols (), block.x); grid.y = divUp (depth.rows (), block.y); float fx = intr.fx, cx = intr.cx; float fy = intr.fy, cy = intr.cy; computeVmapKernel<<<grid, block>>>(depth, vmap, 1.f / fx, 1.f / fy, cx, cy); cudaSafeCall (cudaGetLastError ()); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void createNMap (const MapArr& vmap, MapArr& nmap) { nmap.create (vmap.rows (), vmap.cols ()); int rows = vmap.rows () / 3; int cols = vmap.cols (); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); computeNmapKernel<<<grid, block>>>(rows, cols, vmap, nmap); cudaSafeCall (cudaGetLastError ()); } } } } namespace pcl { namespace device { namespace kinfuLS { __global__ void transformMapsKernel (int rows, int cols, const PtrStep<float> vmap_src, const PtrStep<float> nmap_src, const Mat33 Rmat, const float3 tvec, PtrStepSz<float> vmap_dst, PtrStep<float> nmap_dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; const float qnan = numeric_limits<float>::quiet_NaN (); if (x < cols && y < rows) { //vetexes float3 vsrc, vdst = make_float3 (qnan, qnan, qnan); vsrc.x = vmap_src.ptr (y)[x]; if (!isnan (vsrc.x)) { vsrc.y = vmap_src.ptr (y + rows)[x]; vsrc.z = vmap_src.ptr (y + 2 * rows)[x]; vdst = Rmat * vsrc + tvec; vmap_dst.ptr (y + rows)[x] = vdst.y; vmap_dst.ptr (y + 2 * rows)[x] = vdst.z; } vmap_dst.ptr (y)[x] = vdst.x; //normals float3 nsrc, ndst = make_float3 (qnan, qnan, qnan); nsrc.x = nmap_src.ptr (y)[x]; if (!isnan (nsrc.x)) { nsrc.y = nmap_src.ptr (y + rows)[x]; nsrc.z = nmap_src.ptr (y + 2 * rows)[x]; ndst = Rmat * nsrc; nmap_dst.ptr (y + rows)[x] = ndst.y; nmap_dst.ptr (y + 2 * rows)[x] = ndst.z; } nmap_dst.ptr (y)[x] = ndst.x; } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void transformMaps (const MapArr& vmap_src, const MapArr& nmap_src, const Mat33& Rmat, const float3& tvec, MapArr& vmap_dst, MapArr& nmap_dst) { int cols = vmap_src.cols (); int rows = vmap_src.rows () / 3; vmap_dst.create (rows * 3, cols); nmap_dst.create (rows * 3, cols); dim3 block (32, 8); dim3 grid (1, 1, 1); grid.x = divUp (cols, block.x); grid.y = divUp (rows, block.y); transformMapsKernel<<<grid, block>>>(rows, cols, vmap_src, nmap_src, Rmat, tvec, vmap_dst, nmap_dst); cudaSafeCall (cudaGetLastError ()); cudaSafeCall (cudaDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { template<bool normalize> __global__ void resizeMapKernel (int drows, int dcols, int srows, const PtrStep<float> input, PtrStep<float> output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= dcols || y >= drows) return; const float qnan = numeric_limits<float>::quiet_NaN (); int xs = x * 2; int ys = y * 2; float x00 = input.ptr (ys + 0)[xs + 0]; float x01 = input.ptr (ys + 0)[xs + 1]; float x10 = input.ptr (ys + 1)[xs + 0]; float x11 = input.ptr (ys + 1)[xs + 1]; if (isnan (x00) || isnan (x01) || isnan (x10) || isnan (x11)) { output.ptr (y)[x] = qnan; return; } else { float3 n; n.x = (x00 + x01 + x10 + x11) / 4; float y00 = input.ptr (ys + srows + 0)[xs + 0]; float y01 = input.ptr (ys + srows + 0)[xs + 1]; float y10 = input.ptr (ys + srows + 1)[xs + 0]; float y11 = input.ptr (ys + srows + 1)[xs + 1]; n.y = (y00 + y01 + y10 + y11) / 4; float z00 = input.ptr (ys + 2 * srows + 0)[xs + 0]; float z01 = input.ptr (ys + 2 * srows + 0)[xs + 1]; float z10 = input.ptr (ys + 2 * srows + 1)[xs + 0]; float z11 = input.ptr (ys + 2 * srows + 1)[xs + 1]; n.z = (z00 + z01 + z10 + z11) / 4; if (normalize) n = normalized (n); output.ptr (y )[x] = n.x; output.ptr (y + drows)[x] = n.y; output.ptr (y + 2 * drows)[x] = n.z; } } template<bool normalize> void resizeMap (const MapArr& input, MapArr& output) { int in_cols = input.cols (); int in_rows = input.rows () / 3; int out_cols = in_cols / 2; int out_rows = in_rows / 2; output.create (out_rows * 3, out_cols); dim3 block (32, 8); dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y)); resizeMapKernel<normalize><< < grid, block>>>(out_rows, out_cols, in_rows, input, output); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void resizeVMap (const MapArr& input, MapArr& output) { resizeMap<false>(input, output); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void resizeNMap (const MapArr& input, MapArr& output) { resizeMap<true>(input, output); } } } } namespace pcl { namespace device { namespace kinfuLS { template<typename T> __global__ void convertMapKernel (int rows, int cols, const PtrStep<float> map, PtrStep<T> output) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= cols || y >= rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); T t; t.x = map.ptr (y)[x]; if (!isnan (t.x)) { t.y = map.ptr (y + rows)[x]; t.z = map.ptr (y + 2 * rows)[x]; } else t.y = t.z = qnan; output.ptr (y)[x] = t; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> void convert (const MapArr& vmap, DeviceArray2D<T>& output) { int cols = vmap.cols (); int rows = vmap.rows () / 3; output.create (rows, cols); dim3 block (32, 8); dim3 grid (divUp (cols, block.x), divUp (rows, block.y)); convertMapKernel<T><< < grid, block>>>(rows, cols, vmap, output); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } template void convert (const MapArr& vmap, DeviceArray2D<float4>& output); template void convert (const MapArr& vmap, DeviceArray2D<float8>& output); } } } namespace pcl { namespace device { namespace kinfuLS { __global__ void mergePointNormalKernel (const float4* cloud, const float8* normals, PtrSz<float12> output) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < output.size) { float4 p = cloud[idx]; float8 n = normals[idx]; float12 o; o.x = p.x; o.y = p.y; o.z = p.z; o.normal_x = n.x; o.normal_y = n.y; o.normal_z = n.z; output.data[idx] = o; } } void mergePointNormal (const DeviceArray<float4>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output) { const int block = 256; int total = (int)output.size (); mergePointNormalKernel<<<divUp (total, block), block>>>(cloud, normals, output); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } } } }
f3abdd57408f3854a615e1dc4363dd858f026f64.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <math_functions.h> #include "CMmotion.cu" #include <stdio.h> #define FRAME_PERIOD (1 / 60.0) #define ZERO_NUM (1e-8) #define A_FACTOR (1.5) #define OUR_MAX_ACC (450) #define OUR_MAX_DEC (450) #define OUR_MAX_VEL (300) #define THEIR_MAX_ACC (500) #define THEIR_MAX_DEC (500) #define THEIR_MAC_VEL (350) #define PI (3.14159265359) #define G (9.8) #define SQRT_2 (1.414) #define TIME_FOR_OUR (0) #define TIME_FOR_OUR_BOTH_KEEP (-0.2) #define TIME_FOR_THEIR_BOTH_KEEP (-0.4) #define TIME_FOR_THEIR (-0.6) #define TIME_FOR_JUDGE_HOLDING (0.5) #define FRICTION (87) #define PLAYER_CENTER_TO_BALL_CENTER (60) #define MAX_PLAYER_NUM (12) #define THREAD_NUM (128) #define BLOCK_X (16) #define BLOCK_Y (MAX_PLAYER_NUM * 2) #define MAX_BALL_SPEED (650) #define MIN_BALL_SPEED (50) #define BALL_SPEED_UNIT ((MAX_BALL_SPEED - MIN_BALL_SPEED) / BLOCK_X) #define MIN_DELTA_TIME (0.2) typedef struct { double x, y; } Vector; typedef struct { double x, y; } Point; typedef struct { Point Pos; Vector Vel; bool isValid; } Player; typedef struct { Point interPos; double interTime; double Vel; float dir; int playerIndex; double deltaTime; double Q; } rType; __device__ bool IsInField(Point p) { return (p.x > -600 && p.x < 600 && p.y < 450 && p.y > -450); } __device__ bool IsInPenalty(Point p) { return (p.x < -480 && p.x > -600 && p.y > -120 && p.y < 120) || (p.x > 480 && p.x < 600 && p.y > -120 && p.y < 120); } __device__ bool predictedInterTime(Point mePoint, Point ballPoint, Vector meVel, Vector ballVel, Point* interceptPoint, double* interTime, double responseTime) { if(sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) < 40){ *interceptPoint = ballPoint;// *interTime = predictedTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);// return true; } const double ballAcc = FRICTION / 2;// double ballArriveTime = 0; double meArriveTime = 9999; const double stepTime = 0.1;// double testBallLength = 0;// Point testPoint = ballPoint; double testVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); double max_time = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) / ballAcc; bool canInter = true; for (ballArriveTime = 0; ballArriveTime < max_time; ballArriveTime += stepTime ) { testVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) - ballAcc*ballArriveTime;//v_0-at testBallLength = PLAYER_CENTER_TO_BALL_CENTER + (sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) + testVel)*ballArriveTime / 2;// Vector direc; direc.x = testBallLength * ballVel.x / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); direc.y = testBallLength * ballVel.y / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); testPoint.x = ballPoint.x + direc.x; testPoint.y = ballPoint.y + direc.y; meArriveTime = predictedTime(mePoint.x, mePoint.y, testPoint.x, testPoint.y, meVel.x, meVel.y);// if(meArriveTime < 0.15) meArriveTime = 0; if(IsInPenalty(testPoint)) continue; if (!IsInField(testPoint)) { canInter = false; break; } if(meArriveTime + responseTime < ballArriveTime) break; } if(!canInter || ballArriveTime >= max_time) { interceptPoint->x = 9999; interceptPoint->y = 9999; *interTime = 9999; return false; } *interceptPoint = testPoint;// *interTime = predictedTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);// return true; } __device__ bool predictedChipInterTime(Point mePoint, Point ballPoint, Vector meVel, Vector ballVel, Point* interceptPoint, double* interTime, double responseTime) { double chipVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); double meArriveTime = 9999; double ballAcc = FRICTION / 2.0;// double stepTime = 0.1; double testBallLength = 0;// Point testPoint = ballPoint; double factor_1 = 0.2; double factor_2 = 0.1; double time_1 = SQRT_2*chipVel/100.0/G; double time_2 = SQRT_2*chipVel*factor_1/100.0/G; double length_1 = (chipVel*chipVel)/100/G; double length_2 = (factor_1*chipVel*factor_1*chipVel)/100/G; double moveVel = chipVel / SQRT_2 * factor_2; double max_time = SQRT_2 * chipVel / 100 / G + SQRT_2 * 0.2 * chipVel / 100 / G + chipVel / (SQRT_2 * ballAcc); // printf("%f\n", ballAcc); bool canInter = true; double ballArriveTime = time_1 + time_2; while (ballArriveTime < max_time) { Vector direc; testBallLength = length_1 + length_2 + (moveVel * ballArriveTime - 0.5*ballAcc*ballArriveTime*ballArriveTime); direc.x = testBallLength * ballVel.x / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); direc.y = testBallLength * ballVel.y / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); testPoint.x = ballPoint.x + direc.x; testPoint.y = ballPoint.y + direc.y; meArriveTime = predictedTime(mePoint.x, mePoint.y, testPoint.x, testPoint.y, meVel.x, meVel.y);// if(meArriveTime < 0.10) meArriveTime = 0; if(IsInPenalty(testPoint)) { ballArriveTime += stepTime; continue; } if (!IsInField(testPoint)) { canInter = false; break; } if(meArriveTime + responseTime < ballArriveTime) break; ballArriveTime += stepTime; } if(!canInter || ballArriveTime >= max_time) { interceptPoint->x = 9999; interceptPoint->y = 9999; *interTime = 9999; return false; } *interceptPoint = testPoint;// *interTime = predictedTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);// return true; } __device__ bool predictedTheirInterTime(Point mePoint, Point ballPoint, Vector meVel, Vector ballVel, Point* interceptPoint, double* interTime, double responseTime) { if(sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) < 40){ *interceptPoint = ballPoint;// *interTime = predictedTheirTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);// return true; } const double ballAcc = FRICTION / 2;// double ballArriveTime = 0; double meArriveTime = 9999; const double stepTime = 0.1;// double testBallLength = 0;// Point testPoint = ballPoint; double testVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); double max_time = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) / ballAcc; bool canInter = true; for (ballArriveTime = 0; ballArriveTime < max_time; ballArriveTime += stepTime ) { testVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) - ballAcc*ballArriveTime;//v_0-at testBallLength = PLAYER_CENTER_TO_BALL_CENTER + (sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) + testVel)*ballArriveTime / 2;// Vector direc; direc.x = testBallLength * ballVel.x / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); direc.y = testBallLength * ballVel.y / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); testPoint.x = ballPoint.x + direc.x; testPoint.y = ballPoint.y + direc.y; meArriveTime = predictedTheirTime(mePoint.x, mePoint.y, testPoint.x, testPoint.y, meVel.x, meVel.y);// if(meArriveTime < 0.15) meArriveTime = 0; if(IsInPenalty(testPoint)) continue; if (!IsInField(testPoint)) { canInter = false; break; } if(meArriveTime + responseTime < ballArriveTime) break; } if(!canInter) { interceptPoint->x = 9999; interceptPoint->y = 9999; *interTime = 9999; return false; } *interceptPoint = testPoint;// *interTime = predictedTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);// return true; } __global__ void calculateAllInterInfo(Player* players, Point* ballPos, rType* bestPass) { int angleIndex = threadIdx.x; int speedIndex = blockIdx.x; int playerNum = blockIdx.y; Vector ballVel; ballVel.x = (speedIndex * BALL_SPEED_UNIT + MIN_BALL_SPEED) * cospi(2*PI* angleIndex / THREAD_NUM); ballVel.y = (speedIndex * BALL_SPEED_UNIT + MIN_BALL_SPEED) * sinpi(2*PI* angleIndex / THREAD_NUM); double interTime; Point interPoint; interTime = 9999; interPoint.x = 9999; interPoint.y = 9999; if( players[playerNum].isValid && playerNum < 12) predictedInterTime(players[playerNum].Pos, *ballPos, players[playerNum].Vel, ballVel, &interPoint, &interTime, 0); else if(players[playerNum].isValid) predictedTheirInterTime(players[playerNum].Pos, *ballPos, players[playerNum].Vel, ballVel, &interPoint, &interTime, 0); // if(interTime > 0 && playerNum - 12 < 10) { // printf("%d : %f \n", playerNum - 12, interTime); // } int offset = blockIdx.y + gridDim.y * (threadIdx.x + blockIdx.x * blockDim.x); bestPass[offset].interPos = interPoint; bestPass[offset].interTime = interTime; bestPass[offset].playerIndex = playerNum; // /***************** chip *******************/ if( players[playerNum].isValid && playerNum < 12) predictedChipInterTime(players[playerNum].Pos, *ballPos, players[playerNum].Vel, ballVel, &interPoint, &interTime, 0); else if(players[playerNum].isValid) predictedChipInterTime(players[playerNum].Pos, *ballPos, players[playerNum].Vel, ballVel, &interPoint, &interTime, 0); offset += BLOCK_X * BLOCK_Y * THREAD_NUM; bestPass[offset].interPos = interPoint; bestPass[offset].interTime = interTime; bestPass[offset].playerIndex = playerNum; __syncthreads(); } __global__ void getBest(rType* passPoints) { __shared__ rType iP[BLOCK_Y]; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int playerNum = threadIdx.x; iP[playerNum] = passPoints[blockId * blockDim.x + playerNum]; __syncthreads(); bool even = true; for(int i = 0; i < blockDim.x; i++) { if(playerNum < blockDim.x - 1 && even && iP[playerNum].interTime > iP[playerNum + 1].interTime) { rType temp; temp = iP[playerNum + 1]; iP[playerNum + 1] = iP[playerNum]; iP[playerNum] = temp; } else if(playerNum > 0 && !even && iP[playerNum].interTime < iP[playerNum - 1].interTime) { rType temp; temp = iP[playerNum]; iP[playerNum] = iP[playerNum - 1]; iP[playerNum - 1] = temp; } even = !even; __syncthreads(); } passPoints[blockId * blockDim.x + playerNum] = iP[playerNum]; /************************/ __shared__ rType iP2[BLOCK_Y]; iP2[playerNum] = passPoints[blockId * blockDim.x + playerNum + BLOCK_X * BLOCK_Y * THREAD_NUM]; __syncthreads(); even = true; for(int i = 0; i < blockDim.x; i++) { if(playerNum < blockDim.x - 1 && even && iP2[playerNum].interTime > iP2[playerNum + 1].interTime) { rType temp; temp = iP2[playerNum + 1]; iP2[playerNum + 1] = iP2[playerNum]; iP2[playerNum] = temp; } else if(playerNum > 0 && !even && iP2[playerNum].interTime < iP2[playerNum - 1].interTime) { rType temp; temp = iP2[playerNum]; iP2[playerNum] = iP2[playerNum - 1]; iP2[playerNum - 1] = temp; } even = !even; __syncthreads(); } passPoints[blockId * blockDim.x + playerNum + BLOCK_X * BLOCK_Y * THREAD_NUM] = iP2[playerNum]; __syncthreads(); } extern "C" void BestPass(const Player* players, const Point ball, rType* result) { Player *dev_players; Point *dev_ball; rType *dev_bestPass, *bestPass; hipMalloc((void**)&dev_players, 2 * MAX_PLAYER_NUM * sizeof(Player)); hipMalloc((void**)&dev_ball, sizeof(Point)); hipMalloc((void**)&dev_bestPass, 2 * BLOCK_X * BLOCK_Y * THREAD_NUM * sizeof(rType)); hipMemcpy(dev_players, players, 2 * MAX_PLAYER_NUM * sizeof(Player), hipMemcpyHostToDevice); hipMemcpy(dev_ball, &ball, sizeof(Point), hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); dim3 bolcks(BLOCK_X, BLOCK_Y); hipLaunchKernelGGL(( calculateAllInterInfo) , dim3(bolcks), dim3(THREAD_NUM) , 0, 0, dev_players, dev_ball, dev_bestPass); dim3 blocks2(BLOCK_X, THREAD_NUM); hipLaunchKernelGGL(( getBest), dim3(blocks2), dim3(BLOCK_Y) , 0, 0, dev_bestPass); bestPass = new rType[2 * BLOCK_X * BLOCK_Y * THREAD_NUM]; hipMemcpy(bestPass, dev_bestPass, 2 * BLOCK_X * BLOCK_Y * THREAD_NUM * sizeof(rType), hipMemcpyDeviceToHost); hipError_t cudaStatus = hipGetLastError(); // printf("%d %d\n", cudaStatus, hipSuccess); if (cudaStatus != hipSuccess){ printf("CUDA ERROR: %d\n", (int)cudaStatus); printf("Error Name: %s\n", hipGetErrorName(cudaStatus)); printf("Description: %s\n", hipGetErrorString(cudaStatus)); } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Time: %.5f ms\n", milliseconds); rType defaultPlayer; defaultPlayer.dir = 9999; defaultPlayer.interPos.x = 9999; defaultPlayer.interPos.y = 9999; defaultPlayer.interTime = 9999; defaultPlayer.Vel = 9999; defaultPlayer.deltaTime = -9999; for(int i = 0; i < BLOCK_X * BLOCK_Y * THREAD_NUM; i += BLOCK_Y) { int playerNum = 0; for(int j = 0; j < MAX_PLAYER_NUM; j++) { if(bestPass[i + j].playerIndex > 11) { while(playerNum < MAX_PLAYER_NUM) { result[i / 2 + playerNum] = defaultPlayer; playerNum++; } for(int k = 0; k < j; k++) { result[i / 2 + k].deltaTime = bestPass[i + j].interTime - result[i / 2 + k].interTime; if(result[i / 2 + k].deltaTime < MIN_DELTA_TIME) result[i / 2 + k] = defaultPlayer; } break; } else { result[i / 2 + playerNum] = bestPass[i + j]; playerNum++; } } } for(int i = BLOCK_X * BLOCK_Y * THREAD_NUM; i < 2 * BLOCK_X * BLOCK_Y * THREAD_NUM; i += BLOCK_Y) { int playerNum = 0; for(int j = 0; j < MAX_PLAYER_NUM; j++) { if(bestPass[i + j].playerIndex > 11) { while(playerNum < MAX_PLAYER_NUM) { result[i / 2 + playerNum] = defaultPlayer; playerNum++; } for(int k = 0; k < j; k++) { result[i / 2 + k].deltaTime = bestPass[i + j].interTime - result[i / 2 + k].interTime; if(result[i / 2 + k].deltaTime < MIN_DELTA_TIME) result[i / 2 + k] = defaultPlayer; } break; } else { result[i / 2 + playerNum] = bestPass[i + j]; playerNum++; } } } delete[] bestPass; hipFree(dev_players); hipFree(dev_ball); hipFree(dev_bestPass); }
f3abdd57408f3854a615e1dc4363dd858f026f64.cu
#include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <math_functions.h> #include "CMmotion.cu" #include <stdio.h> #define FRAME_PERIOD (1 / 60.0) #define ZERO_NUM (1e-8) #define A_FACTOR (1.5) #define OUR_MAX_ACC (450) #define OUR_MAX_DEC (450) #define OUR_MAX_VEL (300) #define THEIR_MAX_ACC (500) #define THEIR_MAX_DEC (500) #define THEIR_MAC_VEL (350) #define PI (3.14159265359) #define G (9.8) #define SQRT_2 (1.414) #define TIME_FOR_OUR (0) #define TIME_FOR_OUR_BOTH_KEEP (-0.2) #define TIME_FOR_THEIR_BOTH_KEEP (-0.4) #define TIME_FOR_THEIR (-0.6) #define TIME_FOR_JUDGE_HOLDING (0.5) #define FRICTION (87) #define PLAYER_CENTER_TO_BALL_CENTER (60) #define MAX_PLAYER_NUM (12) #define THREAD_NUM (128) #define BLOCK_X (16) #define BLOCK_Y (MAX_PLAYER_NUM * 2) #define MAX_BALL_SPEED (650) #define MIN_BALL_SPEED (50) #define BALL_SPEED_UNIT ((MAX_BALL_SPEED - MIN_BALL_SPEED) / BLOCK_X) #define MIN_DELTA_TIME (0.2) typedef struct { double x, y; } Vector; typedef struct { double x, y; } Point; typedef struct { Point Pos; Vector Vel; bool isValid; } Player; typedef struct { Point interPos; double interTime; double Vel; float dir; int playerIndex; double deltaTime; double Q; } rType; __device__ bool IsInField(Point p) { return (p.x > -600 && p.x < 600 && p.y < 450 && p.y > -450); } __device__ bool IsInPenalty(Point p) { return (p.x < -480 && p.x > -600 && p.y > -120 && p.y < 120) || (p.x > 480 && p.x < 600 && p.y > -120 && p.y < 120); } __device__ bool predictedInterTime(Point mePoint, Point ballPoint, Vector meVel, Vector ballVel, Point* interceptPoint, double* interTime, double responseTime) { if(sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) < 40){ *interceptPoint = ballPoint;//截球点 *interTime = predictedTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);//截球时间 return true; } const double ballAcc = FRICTION / 2;//球减速度 double ballArriveTime = 0; double meArriveTime = 9999; const double stepTime = 0.1;//最少帧数 double testBallLength = 0;//球移动距离 Point testPoint = ballPoint; double testVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); double max_time = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) / ballAcc; bool canInter = true; for (ballArriveTime = 0; ballArriveTime < max_time; ballArriveTime += stepTime ) { testVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) - ballAcc*ballArriveTime;//v_0-at testBallLength = PLAYER_CENTER_TO_BALL_CENTER + (sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) + testVel)*ballArriveTime / 2;//梯形法计算球移动距离 Vector direc; direc.x = testBallLength * ballVel.x / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); direc.y = testBallLength * ballVel.y / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); testPoint.x = ballPoint.x + direc.x; testPoint.y = ballPoint.y + direc.y; meArriveTime = predictedTime(mePoint.x, mePoint.y, testPoint.x, testPoint.y, meVel.x, meVel.y);//我到截球点的时间 if(meArriveTime < 0.15) meArriveTime = 0; if(IsInPenalty(testPoint)) continue; if (!IsInField(testPoint)) { canInter = false; break; } if(meArriveTime + responseTime < ballArriveTime) break; } if(!canInter || ballArriveTime >= max_time) { interceptPoint->x = 9999; interceptPoint->y = 9999; *interTime = 9999; return false; } *interceptPoint = testPoint;//截球点 *interTime = predictedTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);//截球时间 return true; } __device__ bool predictedChipInterTime(Point mePoint, Point ballPoint, Vector meVel, Vector ballVel, Point* interceptPoint, double* interTime, double responseTime) { double chipVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); double meArriveTime = 9999; double ballAcc = FRICTION / 2.0;//球减速度 double stepTime = 0.1; double testBallLength = 0;//球移动距离 Point testPoint = ballPoint; double factor_1 = 0.2; double factor_2 = 0.1; double time_1 = SQRT_2*chipVel/100.0/G; double time_2 = SQRT_2*chipVel*factor_1/100.0/G; double length_1 = (chipVel*chipVel)/100/G; double length_2 = (factor_1*chipVel*factor_1*chipVel)/100/G; double moveVel = chipVel / SQRT_2 * factor_2; double max_time = SQRT_2 * chipVel / 100 / G + SQRT_2 * 0.2 * chipVel / 100 / G + chipVel / (SQRT_2 * ballAcc); // printf("%f\n", ballAcc); bool canInter = true; double ballArriveTime = time_1 + time_2; while (ballArriveTime < max_time) { Vector direc; testBallLength = length_1 + length_2 + (moveVel * ballArriveTime - 0.5*ballAcc*ballArriveTime*ballArriveTime); direc.x = testBallLength * ballVel.x / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); direc.y = testBallLength * ballVel.y / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); testPoint.x = ballPoint.x + direc.x; testPoint.y = ballPoint.y + direc.y; meArriveTime = predictedTime(mePoint.x, mePoint.y, testPoint.x, testPoint.y, meVel.x, meVel.y);//我到截球点的时间 if(meArriveTime < 0.10) meArriveTime = 0; if(IsInPenalty(testPoint)) { ballArriveTime += stepTime; continue; } if (!IsInField(testPoint)) { canInter = false; break; } if(meArriveTime + responseTime < ballArriveTime) break; ballArriveTime += stepTime; } if(!canInter || ballArriveTime >= max_time) { interceptPoint->x = 9999; interceptPoint->y = 9999; *interTime = 9999; return false; } *interceptPoint = testPoint;//截球点 *interTime = predictedTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);//截球时间 return true; } __device__ bool predictedTheirInterTime(Point mePoint, Point ballPoint, Vector meVel, Vector ballVel, Point* interceptPoint, double* interTime, double responseTime) { if(sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) < 40){ *interceptPoint = ballPoint;//截球点 *interTime = predictedTheirTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);//截球时间 return true; } const double ballAcc = FRICTION / 2;//球减速度 double ballArriveTime = 0; double meArriveTime = 9999; const double stepTime = 0.1;//最少帧数 double testBallLength = 0;//球移动距离 Point testPoint = ballPoint; double testVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); double max_time = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) / ballAcc; bool canInter = true; for (ballArriveTime = 0; ballArriveTime < max_time; ballArriveTime += stepTime ) { testVel = sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) - ballAcc*ballArriveTime;//v_0-at testBallLength = PLAYER_CENTER_TO_BALL_CENTER + (sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y) + testVel)*ballArriveTime / 2;//梯形法计算球移动距离 Vector direc; direc.x = testBallLength * ballVel.x / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); direc.y = testBallLength * ballVel.y / sqrt(ballVel.x * ballVel.x + ballVel.y * ballVel.y); testPoint.x = ballPoint.x + direc.x; testPoint.y = ballPoint.y + direc.y; meArriveTime = predictedTheirTime(mePoint.x, mePoint.y, testPoint.x, testPoint.y, meVel.x, meVel.y);//我到截球点的时间 if(meArriveTime < 0.15) meArriveTime = 0; if(IsInPenalty(testPoint)) continue; if (!IsInField(testPoint)) { canInter = false; break; } if(meArriveTime + responseTime < ballArriveTime) break; } if(!canInter) { interceptPoint->x = 9999; interceptPoint->y = 9999; *interTime = 9999; return false; } *interceptPoint = testPoint;//截球点 *interTime = predictedTime(mePoint.x, mePoint.y, interceptPoint->x, interceptPoint->y, meVel.x, meVel.y);//截球时间 return true; } __global__ void calculateAllInterInfo(Player* players, Point* ballPos, rType* bestPass) { int angleIndex = threadIdx.x; int speedIndex = blockIdx.x; int playerNum = blockIdx.y; Vector ballVel; ballVel.x = (speedIndex * BALL_SPEED_UNIT + MIN_BALL_SPEED) * cospi(2*PI* angleIndex / THREAD_NUM); ballVel.y = (speedIndex * BALL_SPEED_UNIT + MIN_BALL_SPEED) * sinpi(2*PI* angleIndex / THREAD_NUM); double interTime; Point interPoint; interTime = 9999; interPoint.x = 9999; interPoint.y = 9999; if( players[playerNum].isValid && playerNum < 12) predictedInterTime(players[playerNum].Pos, *ballPos, players[playerNum].Vel, ballVel, &interPoint, &interTime, 0); else if(players[playerNum].isValid) predictedTheirInterTime(players[playerNum].Pos, *ballPos, players[playerNum].Vel, ballVel, &interPoint, &interTime, 0); // if(interTime > 0 && playerNum - 12 < 10) { // printf("%d : %f \n", playerNum - 12, interTime); // } int offset = blockIdx.y + gridDim.y * (threadIdx.x + blockIdx.x * blockDim.x); bestPass[offset].interPos = interPoint; bestPass[offset].interTime = interTime; bestPass[offset].playerIndex = playerNum; // /***************** chip *******************/ if( players[playerNum].isValid && playerNum < 12) predictedChipInterTime(players[playerNum].Pos, *ballPos, players[playerNum].Vel, ballVel, &interPoint, &interTime, 0); else if(players[playerNum].isValid) predictedChipInterTime(players[playerNum].Pos, *ballPos, players[playerNum].Vel, ballVel, &interPoint, &interTime, 0); offset += BLOCK_X * BLOCK_Y * THREAD_NUM; bestPass[offset].interPos = interPoint; bestPass[offset].interTime = interTime; bestPass[offset].playerIndex = playerNum; __syncthreads(); } __global__ void getBest(rType* passPoints) { __shared__ rType iP[BLOCK_Y]; int blockId = blockIdx.y * gridDim.x + blockIdx.x; int playerNum = threadIdx.x; iP[playerNum] = passPoints[blockId * blockDim.x + playerNum]; __syncthreads(); bool even = true; for(int i = 0; i < blockDim.x; i++) { if(playerNum < blockDim.x - 1 && even && iP[playerNum].interTime > iP[playerNum + 1].interTime) { rType temp; temp = iP[playerNum + 1]; iP[playerNum + 1] = iP[playerNum]; iP[playerNum] = temp; } else if(playerNum > 0 && !even && iP[playerNum].interTime < iP[playerNum - 1].interTime) { rType temp; temp = iP[playerNum]; iP[playerNum] = iP[playerNum - 1]; iP[playerNum - 1] = temp; } even = !even; __syncthreads(); } passPoints[blockId * blockDim.x + playerNum] = iP[playerNum]; /************************/ __shared__ rType iP2[BLOCK_Y]; iP2[playerNum] = passPoints[blockId * blockDim.x + playerNum + BLOCK_X * BLOCK_Y * THREAD_NUM]; __syncthreads(); even = true; for(int i = 0; i < blockDim.x; i++) { if(playerNum < blockDim.x - 1 && even && iP2[playerNum].interTime > iP2[playerNum + 1].interTime) { rType temp; temp = iP2[playerNum + 1]; iP2[playerNum + 1] = iP2[playerNum]; iP2[playerNum] = temp; } else if(playerNum > 0 && !even && iP2[playerNum].interTime < iP2[playerNum - 1].interTime) { rType temp; temp = iP2[playerNum]; iP2[playerNum] = iP2[playerNum - 1]; iP2[playerNum - 1] = temp; } even = !even; __syncthreads(); } passPoints[blockId * blockDim.x + playerNum + BLOCK_X * BLOCK_Y * THREAD_NUM] = iP2[playerNum]; __syncthreads(); } extern "C" void BestPass(const Player* players, const Point ball, rType* result) { Player *dev_players; Point *dev_ball; rType *dev_bestPass, *bestPass; cudaMalloc((void**)&dev_players, 2 * MAX_PLAYER_NUM * sizeof(Player)); cudaMalloc((void**)&dev_ball, sizeof(Point)); cudaMalloc((void**)&dev_bestPass, 2 * BLOCK_X * BLOCK_Y * THREAD_NUM * sizeof(rType)); cudaMemcpy(dev_players, players, 2 * MAX_PLAYER_NUM * sizeof(Player), cudaMemcpyHostToDevice); cudaMemcpy(dev_ball, &ball, sizeof(Point), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); dim3 bolcks(BLOCK_X, BLOCK_Y); calculateAllInterInfo <<< bolcks, THREAD_NUM >>> (dev_players, dev_ball, dev_bestPass); dim3 blocks2(BLOCK_X, THREAD_NUM); getBest<<< blocks2, BLOCK_Y >>> (dev_bestPass); bestPass = new rType[2 * BLOCK_X * BLOCK_Y * THREAD_NUM]; cudaMemcpy(bestPass, dev_bestPass, 2 * BLOCK_X * BLOCK_Y * THREAD_NUM * sizeof(rType), cudaMemcpyDeviceToHost); cudaError_t cudaStatus = cudaGetLastError(); // printf("%d %d\n", cudaStatus, cudaSuccess); if (cudaStatus != cudaSuccess){ printf("CUDA ERROR: %d\n", (int)cudaStatus); printf("Error Name: %s\n", cudaGetErrorName(cudaStatus)); printf("Description: %s\n", cudaGetErrorString(cudaStatus)); } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time: %.5f ms\n", milliseconds); rType defaultPlayer; defaultPlayer.dir = 9999; defaultPlayer.interPos.x = 9999; defaultPlayer.interPos.y = 9999; defaultPlayer.interTime = 9999; defaultPlayer.Vel = 9999; defaultPlayer.deltaTime = -9999; for(int i = 0; i < BLOCK_X * BLOCK_Y * THREAD_NUM; i += BLOCK_Y) { int playerNum = 0; for(int j = 0; j < MAX_PLAYER_NUM; j++) { if(bestPass[i + j].playerIndex > 11) { while(playerNum < MAX_PLAYER_NUM) { result[i / 2 + playerNum] = defaultPlayer; playerNum++; } for(int k = 0; k < j; k++) { result[i / 2 + k].deltaTime = bestPass[i + j].interTime - result[i / 2 + k].interTime; if(result[i / 2 + k].deltaTime < MIN_DELTA_TIME) result[i / 2 + k] = defaultPlayer; } break; } else { result[i / 2 + playerNum] = bestPass[i + j]; playerNum++; } } } for(int i = BLOCK_X * BLOCK_Y * THREAD_NUM; i < 2 * BLOCK_X * BLOCK_Y * THREAD_NUM; i += BLOCK_Y) { int playerNum = 0; for(int j = 0; j < MAX_PLAYER_NUM; j++) { if(bestPass[i + j].playerIndex > 11) { while(playerNum < MAX_PLAYER_NUM) { result[i / 2 + playerNum] = defaultPlayer; playerNum++; } for(int k = 0; k < j; k++) { result[i / 2 + k].deltaTime = bestPass[i + j].interTime - result[i / 2 + k].interTime; if(result[i / 2 + k].deltaTime < MIN_DELTA_TIME) result[i / 2 + k] = defaultPlayer; } break; } else { result[i / 2 + playerNum] = bestPass[i + j]; playerNum++; } } } delete[] bestPass; cudaFree(dev_players); cudaFree(dev_ball); cudaFree(dev_bestPass); }
882128efefb28db36226a05b3370d3263ad1c559.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_CATEGORICAL_ACCURACY_LAYER_INSTANTIATE #include "lbann/comm_impl.hpp" #include "lbann/layers/loss/categorical_accuracy_impl.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { /** Fill matrix with corresponding indices. * Indices are equivalent to the global row indices of the input * matrix. */ template <typename TensorDataType> __global__ void fill_indices_kernel(El::Int const local_height, El::Int const local_width, El::Int col_shift, El::Int col_stride, El::Int* __restrict__ indices) { const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int nthreads = blockDim.x * gridDim.x; const El::Int size = local_height * local_width; for (El::Int pos = gid; pos < size; pos += nthreads) { const auto& row = pos % local_height; const auto& col = pos / local_height; indices[row + col * local_height] = col_shift + row * col_stride; } } /** Find largest entry within each CUDA block. * Each block is assigned several entries from the same mini-batch * sample and it finds the largest entry. Results are output to * nblocksx x width matrices. */ template <El::Int block_size, typename TensorDataType> __global__ void reduce_max_entries_kernel(El::Int height, El::Int width, const TensorDataType* __restrict__ values, El::Int values_row_stride, El::Int values_col_stride, const El::Int* __restrict__ indices, El::Int indices_row_stride, El::Int indices_col_stride, TensorDataType* __restrict__ max_values, El::Int* __restrict__ max_indices) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidx = blockIdx.x; const El::Int bidy = blockIdx.y; const El::Int nthreadsx = blockDim.x * gridDim.x; const El::Int nblocksx = gridDim.x; // Reduce each matrix column independently for (El::Int col = bidy; col < width; col += gridDim.y) { // Find largest entry for each thread TensorDataType private_max_val = -gpu_lib::infinity<TensorDataType>(); El::Int private_max_ind = gpu_lib::max<El::Int>(); for (El::Int row = gidx; row < height; row += nthreadsx) { const auto& val = values[row * values_row_stride + col * values_col_stride]; const auto& ind = indices[row * indices_row_stride + col * indices_col_stride]; if (val > private_max_val || (val == private_max_val && ind < private_max_ind)) { private_max_val = val; private_max_ind = ind; } } // Shared memory reduction to get largest entry for each block __shared__ TensorDataType shared_max_vals[block_size]; __shared__ El::Int shared_max_inds[block_size]; shared_max_vals[tid] = private_max_val; shared_max_inds[tid] = private_max_ind; for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { const auto& val = shared_max_vals[tid + stride]; const auto& ind = shared_max_inds[tid + stride]; if (val > shared_max_vals[tid] || (val == shared_max_vals[tid] && ind < shared_max_inds[tid])) { shared_max_vals[tid] = val; shared_max_inds[tid] = ind; } } } if (tid == 0) { max_values[bidx + col * nblocksx] = shared_max_vals[0]; max_indices[bidx + col * nblocksx] = shared_max_inds[0]; } } } /** Compute sample-wise categorical accuracy. * Outputs one if the prediction and label indices match and * otherwise outputs zero. */ template <typename TensorDataType> __global__ void compute_accuracy_kernel(El::Int local_width, const El::Int* __restrict__ prediction_indices, const El::Int* __restrict__ label_indices, TensorDataType* __restrict__ loss, El::Int loss_ldim) { const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int nthreads = blockDim.x * gridDim.x; constexpr El::Int max_ind = gpu_lib::max<El::Int>(); for (El::Int col = gid; col < local_width; col += nthreads) { const auto& prediction = prediction_indices[col]; const auto& label = label_indices[col]; loss[col * loss_ldim] = (prediction == label && prediction < max_ind ? TensorDataType(1.0) : TensorDataType(0.0)); } } /** GPU implementation of categorical accuracy layer forward prop. */ template <typename TensorDataType> void fp_gpu(lbann_comm& comm, const El::AbstractDistMatrix<TensorDataType>& predictions, const El::AbstractDistMatrix<TensorDataType>& labels, El::AbstractDistMatrix<TensorDataType>& loss) { // Local matrices const auto& local_predictions = predictions.LockedMatrix(); const auto& local_labels = labels.LockedMatrix(); auto& local_loss = loss.Matrix(); // Dimensions const auto& local_height = local_predictions.Height(); const auto& local_width = local_predictions.Width(); if (local_width < 1) { return; } // Column communicator auto&& col_comm = predictions.ColComm(); const auto& col_comm_rank = El::mpi::Rank(col_comm); const auto& col_comm_size = El::mpi::Size(col_comm); const auto& col_comm_root = loss.RowOwner(0); // GPU objects auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_loss), gpu::get_sync_info(local_predictions), gpu::get_sync_info(local_labels)); // The comm templates will not convert the multisync, so cast the multisync // and use sync_info for comms. El::SyncInfo<El::Device::GPU> const& sync_info = multisync; // Initialize CUDA threads/blocks for reduction kernel // Note: reduce_max_entries_kernel uses a 2D thread distribution // with a 256 x 1 block and nblocksx x local_width grid. constexpr El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.y = local_width; // Get indices for all input entries gpu_lib::thrust::vector<El::Int> full_inds(local_height * local_width); if (full_inds.size() > 0) { const El::Int grid_size = (full_inds.size() + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel(fill_indices_kernel<TensorDataType>, grid_size, block_size, 0, multisync, local_height, local_width, predictions.ColShift(), predictions.ColStride(), full_inds.data().get()); } // Find largest prediction entries in local data grid_dims.x = (local_height + block_size - 1) / block_size; if (grid_dims.x < 1) { grid_dims.x = 1; } gpu_lib::thrust::vector<TensorDataType> prediction_vals(grid_dims.x * local_width); gpu_lib::thrust::vector<El::Int> prediction_inds(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_predictions.LockedBuffer(), 1, local_predictions.LDim(), full_inds.data().get(), 1, local_height, prediction_vals.data().get(), prediction_inds.data().get()); while (grid_dims.x > 1) { const El::Int prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals( std::move(prediction_vals)); gpu_lib::thrust::vector<El::Int> prev_inds(std::move(prediction_inds)); prediction_vals.resize(grid_dims.x * local_width); prediction_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), 1, prev_height, prev_inds.data().get(), 1, prev_height, prediction_vals.data().get(), prediction_inds.data().get()); } // Gather large prediction entries /// @todo Non-blocking gather Al::request prediction_vals_req, prediction_inds_req; gpu_lib::thrust::vector<TensorDataType> gathered_prediction_vals; gpu_lib::thrust::vector<El::Int> gathered_prediction_inds; if (col_comm_size > 1) { if (col_comm_rank != col_comm_root) { comm.gather(prediction_vals.data().get(), prediction_vals.size(), col_comm_root, col_comm, sync_info); comm.gather(prediction_inds.data().get(), prediction_inds.size(), col_comm_root, col_comm, sync_info); } else { gathered_prediction_vals.resize(prediction_vals.size() * col_comm_size); gathered_prediction_inds.resize(prediction_inds.size() * col_comm_size); comm.gather(prediction_vals.data().get(), prediction_vals.size(), gathered_prediction_vals.data().get(), col_comm, sync_info); comm.gather(prediction_inds.data().get(), prediction_inds.size(), gathered_prediction_inds.data().get(), col_comm, sync_info); } } // Find largest label entries in local data grid_dims.x = (local_height + block_size - 1) / block_size; if (grid_dims.x < 1) { grid_dims.x = 1; } gpu_lib::thrust::vector<TensorDataType> label_vals(grid_dims.x * local_width); gpu_lib::thrust::vector<El::Int> label_inds(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_labels.LockedBuffer(), 1, local_labels.LDim(), full_inds.data().get(), 1, local_height, label_vals.data().get(), label_inds.data().get()); while (grid_dims.x > 1) { const El::Int prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(label_vals)); gpu_lib::thrust::vector<El::Int> prev_inds(std::move(label_inds)); label_vals.resize(grid_dims.x * local_width); label_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), 1, prev_height, prev_inds.data().get(), 1, prev_height, label_vals.data().get(), label_inds.data().get()); } // Gather large label entries /// @todo Non-blocking gather Al::request label_vals_req, label_inds_req; gpu_lib::thrust::vector<TensorDataType> gathered_label_vals; gpu_lib::thrust::vector<El::Int> gathered_label_inds; if (col_comm_size > 1) { if (col_comm_rank != col_comm_root) { comm.gather(label_vals.data().get(), label_vals.size(), col_comm_root, col_comm, sync_info); comm.gather(label_inds.data().get(), label_inds.size(), col_comm_root, col_comm, sync_info); } else { gathered_label_vals.resize(label_vals.size() * col_comm_size); gathered_label_inds.resize(label_inds.size() * col_comm_size); comm.gather(label_vals.data().get(), label_vals.size(), gathered_label_vals.data().get(), col_comm, sync_info); comm.gather(label_inds.data().get(), label_inds.size(), gathered_label_inds.data().get(), col_comm, sync_info); } } // Clean up temporary arrays full_inds.clear(); // Find largest prediction entry in global data comm.wait(prediction_vals_req); comm.wait(prediction_inds_req); if (col_comm_size > 1 && col_comm_rank == col_comm_root) { grid_dims.x = (col_comm_size + block_size - 1) / block_size; if (grid_dims.x < 1) { grid_dims.x = 1; } prediction_vals.resize(grid_dims.x * local_width); prediction_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, col_comm_size, local_width, gathered_prediction_vals.data().get(), col_comm_size, 1, gathered_prediction_inds.data().get(), col_comm_size, 1, prediction_vals.data().get(), prediction_inds.data().get()); while (grid_dims.x > 1) { const El::Int prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals( std::move(prediction_vals)); gpu_lib::thrust::vector<El::Int> prev_inds(std::move(prediction_inds)); prediction_vals.resize(grid_dims.x * local_width); prediction_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), 1, prev_height, prev_inds.data().get(), 1, prev_height, prediction_vals.data().get(), prediction_inds.data().get()); } } // Find largest label entry in global data comm.wait(label_vals_req); comm.wait(label_inds_req); if (col_comm_size > 1 && col_comm_rank == col_comm_root) { grid_dims.x = (col_comm_size + block_size - 1) / block_size; if (grid_dims.x < 1) { grid_dims.x = 1; } label_vals.resize(grid_dims.x * local_width); label_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, col_comm_size, local_width, gathered_label_vals.data().get(), col_comm_size, 1, gathered_label_inds.data().get(), col_comm_size, 1, label_vals.data().get(), label_inds.data().get()); while (grid_dims.x > 1) { const El::Int prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(label_vals)); gpu_lib::thrust::vector<El::Int> prev_inds(std::move(label_inds)); label_vals.resize(grid_dims.x * local_width); label_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), 1, prev_height, prev_inds.data().get(), 1, prev_height, label_vals.data().get(), label_inds.data().get()); } } // Compute categorical accuracy if (col_comm_rank == col_comm_root) { const El::Int grid_size = (local_width + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel(compute_accuracy_kernel<TensorDataType>, grid_size, block_size, 0, multisync, local_width, prediction_inds.data().get(), label_inds.data().get(), local_loss.Buffer(), local_loss.LDim()); } } } // namespace template <typename TensorDataType, data_layout T_layout, El::Device Dev> void categorical_accuracy_layer<TensorDataType, T_layout, Dev>::fp_compute() { fp_gpu(*this->get_comm(), this->get_prev_activations(0), this->get_prev_activations(1), this->get_activations()); } #define PROTO(T) \ template class categorical_accuracy_layer<T, \ data_layout::DATA_PARALLEL, \ El::Device::GPU>; \ template class categorical_accuracy_layer<T, \ data_layout::MODEL_PARALLEL, \ El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
882128efefb28db36226a05b3370d3263ad1c559.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <lbann-dev@llnl.gov> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_CATEGORICAL_ACCURACY_LAYER_INSTANTIATE #include "lbann/comm_impl.hpp" #include "lbann/layers/loss/categorical_accuracy_impl.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { /** Fill matrix with corresponding indices. * Indices are equivalent to the global row indices of the input * matrix. */ template <typename TensorDataType> __global__ void fill_indices_kernel(El::Int const local_height, El::Int const local_width, El::Int col_shift, El::Int col_stride, El::Int* __restrict__ indices) { const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int nthreads = blockDim.x * gridDim.x; const El::Int size = local_height * local_width; for (El::Int pos = gid; pos < size; pos += nthreads) { const auto& row = pos % local_height; const auto& col = pos / local_height; indices[row + col * local_height] = col_shift + row * col_stride; } } /** Find largest entry within each CUDA block. * Each block is assigned several entries from the same mini-batch * sample and it finds the largest entry. Results are output to * nblocksx x width matrices. */ template <El::Int block_size, typename TensorDataType> __global__ void reduce_max_entries_kernel(El::Int height, El::Int width, const TensorDataType* __restrict__ values, El::Int values_row_stride, El::Int values_col_stride, const El::Int* __restrict__ indices, El::Int indices_row_stride, El::Int indices_col_stride, TensorDataType* __restrict__ max_values, El::Int* __restrict__ max_indices) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidx = blockIdx.x; const El::Int bidy = blockIdx.y; const El::Int nthreadsx = blockDim.x * gridDim.x; const El::Int nblocksx = gridDim.x; // Reduce each matrix column independently for (El::Int col = bidy; col < width; col += gridDim.y) { // Find largest entry for each thread TensorDataType private_max_val = -gpu_lib::infinity<TensorDataType>(); El::Int private_max_ind = gpu_lib::max<El::Int>(); for (El::Int row = gidx; row < height; row += nthreadsx) { const auto& val = values[row * values_row_stride + col * values_col_stride]; const auto& ind = indices[row * indices_row_stride + col * indices_col_stride]; if (val > private_max_val || (val == private_max_val && ind < private_max_ind)) { private_max_val = val; private_max_ind = ind; } } // Shared memory reduction to get largest entry for each block __shared__ TensorDataType shared_max_vals[block_size]; __shared__ El::Int shared_max_inds[block_size]; shared_max_vals[tid] = private_max_val; shared_max_inds[tid] = private_max_ind; for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { const auto& val = shared_max_vals[tid + stride]; const auto& ind = shared_max_inds[tid + stride]; if (val > shared_max_vals[tid] || (val == shared_max_vals[tid] && ind < shared_max_inds[tid])) { shared_max_vals[tid] = val; shared_max_inds[tid] = ind; } } } if (tid == 0) { max_values[bidx + col * nblocksx] = shared_max_vals[0]; max_indices[bidx + col * nblocksx] = shared_max_inds[0]; } } } /** Compute sample-wise categorical accuracy. * Outputs one if the prediction and label indices match and * otherwise outputs zero. */ template <typename TensorDataType> __global__ void compute_accuracy_kernel(El::Int local_width, const El::Int* __restrict__ prediction_indices, const El::Int* __restrict__ label_indices, TensorDataType* __restrict__ loss, El::Int loss_ldim) { const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; const El::Int nthreads = blockDim.x * gridDim.x; constexpr El::Int max_ind = gpu_lib::max<El::Int>(); for (El::Int col = gid; col < local_width; col += nthreads) { const auto& prediction = prediction_indices[col]; const auto& label = label_indices[col]; loss[col * loss_ldim] = (prediction == label && prediction < max_ind ? TensorDataType(1.0) : TensorDataType(0.0)); } } /** GPU implementation of categorical accuracy layer forward prop. */ template <typename TensorDataType> void fp_gpu(lbann_comm& comm, const El::AbstractDistMatrix<TensorDataType>& predictions, const El::AbstractDistMatrix<TensorDataType>& labels, El::AbstractDistMatrix<TensorDataType>& loss) { // Local matrices const auto& local_predictions = predictions.LockedMatrix(); const auto& local_labels = labels.LockedMatrix(); auto& local_loss = loss.Matrix(); // Dimensions const auto& local_height = local_predictions.Height(); const auto& local_width = local_predictions.Width(); if (local_width < 1) { return; } // Column communicator auto&& col_comm = predictions.ColComm(); const auto& col_comm_rank = El::mpi::Rank(col_comm); const auto& col_comm_size = El::mpi::Size(col_comm); const auto& col_comm_root = loss.RowOwner(0); // GPU objects auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_loss), gpu::get_sync_info(local_predictions), gpu::get_sync_info(local_labels)); // The comm templates will not convert the multisync, so cast the multisync // and use sync_info for comms. El::SyncInfo<El::Device::GPU> const& sync_info = multisync; // Initialize CUDA threads/blocks for reduction kernel // Note: reduce_max_entries_kernel uses a 2D thread distribution // with a 256 x 1 block and nblocksx x local_width grid. constexpr El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.y = local_width; // Get indices for all input entries gpu_lib::thrust::vector<El::Int> full_inds(local_height * local_width); if (full_inds.size() > 0) { const El::Int grid_size = (full_inds.size() + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel(fill_indices_kernel<TensorDataType>, grid_size, block_size, 0, multisync, local_height, local_width, predictions.ColShift(), predictions.ColStride(), full_inds.data().get()); } // Find largest prediction entries in local data grid_dims.x = (local_height + block_size - 1) / block_size; if (grid_dims.x < 1) { grid_dims.x = 1; } gpu_lib::thrust::vector<TensorDataType> prediction_vals(grid_dims.x * local_width); gpu_lib::thrust::vector<El::Int> prediction_inds(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_predictions.LockedBuffer(), 1, local_predictions.LDim(), full_inds.data().get(), 1, local_height, prediction_vals.data().get(), prediction_inds.data().get()); while (grid_dims.x > 1) { const El::Int prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals( std::move(prediction_vals)); gpu_lib::thrust::vector<El::Int> prev_inds(std::move(prediction_inds)); prediction_vals.resize(grid_dims.x * local_width); prediction_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), 1, prev_height, prev_inds.data().get(), 1, prev_height, prediction_vals.data().get(), prediction_inds.data().get()); } // Gather large prediction entries /// @todo Non-blocking gather Al::request prediction_vals_req, prediction_inds_req; gpu_lib::thrust::vector<TensorDataType> gathered_prediction_vals; gpu_lib::thrust::vector<El::Int> gathered_prediction_inds; if (col_comm_size > 1) { if (col_comm_rank != col_comm_root) { comm.gather(prediction_vals.data().get(), prediction_vals.size(), col_comm_root, col_comm, sync_info); comm.gather(prediction_inds.data().get(), prediction_inds.size(), col_comm_root, col_comm, sync_info); } else { gathered_prediction_vals.resize(prediction_vals.size() * col_comm_size); gathered_prediction_inds.resize(prediction_inds.size() * col_comm_size); comm.gather(prediction_vals.data().get(), prediction_vals.size(), gathered_prediction_vals.data().get(), col_comm, sync_info); comm.gather(prediction_inds.data().get(), prediction_inds.size(), gathered_prediction_inds.data().get(), col_comm, sync_info); } } // Find largest label entries in local data grid_dims.x = (local_height + block_size - 1) / block_size; if (grid_dims.x < 1) { grid_dims.x = 1; } gpu_lib::thrust::vector<TensorDataType> label_vals(grid_dims.x * local_width); gpu_lib::thrust::vector<El::Int> label_inds(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_labels.LockedBuffer(), 1, local_labels.LDim(), full_inds.data().get(), 1, local_height, label_vals.data().get(), label_inds.data().get()); while (grid_dims.x > 1) { const El::Int prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(label_vals)); gpu_lib::thrust::vector<El::Int> prev_inds(std::move(label_inds)); label_vals.resize(grid_dims.x * local_width); label_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), 1, prev_height, prev_inds.data().get(), 1, prev_height, label_vals.data().get(), label_inds.data().get()); } // Gather large label entries /// @todo Non-blocking gather Al::request label_vals_req, label_inds_req; gpu_lib::thrust::vector<TensorDataType> gathered_label_vals; gpu_lib::thrust::vector<El::Int> gathered_label_inds; if (col_comm_size > 1) { if (col_comm_rank != col_comm_root) { comm.gather(label_vals.data().get(), label_vals.size(), col_comm_root, col_comm, sync_info); comm.gather(label_inds.data().get(), label_inds.size(), col_comm_root, col_comm, sync_info); } else { gathered_label_vals.resize(label_vals.size() * col_comm_size); gathered_label_inds.resize(label_inds.size() * col_comm_size); comm.gather(label_vals.data().get(), label_vals.size(), gathered_label_vals.data().get(), col_comm, sync_info); comm.gather(label_inds.data().get(), label_inds.size(), gathered_label_inds.data().get(), col_comm, sync_info); } } // Clean up temporary arrays full_inds.clear(); // Find largest prediction entry in global data comm.wait(prediction_vals_req); comm.wait(prediction_inds_req); if (col_comm_size > 1 && col_comm_rank == col_comm_root) { grid_dims.x = (col_comm_size + block_size - 1) / block_size; if (grid_dims.x < 1) { grid_dims.x = 1; } prediction_vals.resize(grid_dims.x * local_width); prediction_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, col_comm_size, local_width, gathered_prediction_vals.data().get(), col_comm_size, 1, gathered_prediction_inds.data().get(), col_comm_size, 1, prediction_vals.data().get(), prediction_inds.data().get()); while (grid_dims.x > 1) { const El::Int prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals( std::move(prediction_vals)); gpu_lib::thrust::vector<El::Int> prev_inds(std::move(prediction_inds)); prediction_vals.resize(grid_dims.x * local_width); prediction_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), 1, prev_height, prev_inds.data().get(), 1, prev_height, prediction_vals.data().get(), prediction_inds.data().get()); } } // Find largest label entry in global data comm.wait(label_vals_req); comm.wait(label_inds_req); if (col_comm_size > 1 && col_comm_rank == col_comm_root) { grid_dims.x = (col_comm_size + block_size - 1) / block_size; if (grid_dims.x < 1) { grid_dims.x = 1; } label_vals.resize(grid_dims.x * local_width); label_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, col_comm_size, local_width, gathered_label_vals.data().get(), col_comm_size, 1, gathered_label_inds.data().get(), col_comm_size, 1, label_vals.data().get(), label_inds.data().get()); while (grid_dims.x > 1) { const El::Int prev_height = grid_dims.x; grid_dims.x = (prev_height + block_size - 1) / block_size; gpu_lib::thrust::vector<TensorDataType> prev_vals(std::move(label_vals)); gpu_lib::thrust::vector<El::Int> prev_inds(std::move(label_inds)); label_vals.resize(grid_dims.x * local_width); label_inds.resize(grid_dims.x * local_width); hydrogen::gpu::LaunchKernel( reduce_max_entries_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, prev_height, local_width, prev_vals.data().get(), 1, prev_height, prev_inds.data().get(), 1, prev_height, label_vals.data().get(), label_inds.data().get()); } } // Compute categorical accuracy if (col_comm_rank == col_comm_root) { const El::Int grid_size = (local_width + block_size - 1) / block_size; hydrogen::gpu::LaunchKernel(compute_accuracy_kernel<TensorDataType>, grid_size, block_size, 0, multisync, local_width, prediction_inds.data().get(), label_inds.data().get(), local_loss.Buffer(), local_loss.LDim()); } } } // namespace template <typename TensorDataType, data_layout T_layout, El::Device Dev> void categorical_accuracy_layer<TensorDataType, T_layout, Dev>::fp_compute() { fp_gpu(*this->get_comm(), this->get_prev_activations(0), this->get_prev_activations(1), this->get_activations()); } #define PROTO(T) \ template class categorical_accuracy_layer<T, \ data_layout::DATA_PARALLEL, \ El::Device::GPU>; \ template class categorical_accuracy_layer<T, \ data_layout::MODEL_PARALLEL, \ El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
607cb7b943a7619d9775b711e62da6cafe78ee78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "layers/psroi_pooling.h" #define ROUND(x) ((int)((x) + (float)0.5)) using std::max; using std::min; PsroiPooling::PsroiPooling(void **output, vector<int> input_feature_shape, vector<int> input_roi_shape, vector<int> output_shape, float spatial_sacle) { input_featrue_shape_ = input_feature_shape; input_roi_shape_ = input_roi_shape; output_shape_ = output_shape; spatial_scale_ = spatial_sacle; top_data_ = reinterpret_cast<float *>(output[0]); } __global__ void PSROIPoolingForward( const int nthreads, const float *bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float *bottom_rois, const int output_dim, const int group_size, float *top_data, int *mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = static_cast<float>(ROUND(bottom_rois[1])) * spatial_scale; float roi_start_h = static_cast<float>(ROUND(bottom_rois[2])) * spatial_scale; float roi_end_w = static_cast<float>(ROUND(bottom_rois[3]) + 1.) * spatial_scale; float roi_end_h = static_cast<float>(ROUND(bottom_rois[4]) + 1.) * spatial_scale; //printf("%f %f %f %f\n",roi_start_w,roi_start_h,roi_end_w,roi_end_h); // Force too small ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 float roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom float bin_size_h = roi_height / static_cast<float>(pooled_height); float bin_size_w = roi_width / static_cast<float>(pooled_width); int hstart = floor(static_cast<float>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<float>(pw) * bin_size_w + roi_start_w); int hend = ceil(static_cast<float>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<float>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop * group_size + gh) * group_size + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; float out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; out_sum += bottom_data[bottom_index]; } } float bin_area = (hend - hstart) * (wend - wstart); top_data[index] = is_empty ? 0. : out_sum / bin_area; mapping_channel[index] = c; } } void PsroiPooling::forward_gpu(const void *const *inputs) { const float *bottom_data = reinterpret_cast<const float *>(inputs[0]); const float *bottom_rois = reinterpret_cast<const float *>(inputs[1]); int *mapping_channel_ptr = nullptr; //std::cout<<input_roi_shape_[0]<<std::endl; hipMalloc((void **)&mapping_channel_ptr, input_roi_shape_[0] * output_shape_[0] * output_shape_[1] * output_shape_[2] * sizeof(int)); hipMemset(mapping_channel_ptr, -1, input_roi_shape_[0] * output_shape_[0] * output_shape_[1] * output_shape_[2] * sizeof(int)); hipMemset(top_data_, 0, input_roi_shape_[0] * output_shape_[0] * output_shape_[1] * output_shape_[2] * sizeof(float)); int count = input_roi_shape_[0] * output_shape_[0] * output_shape_[1] * output_shape_[2]; hipLaunchKernelGGL(( PSROIPoolingForward), dim3(TENSORRT_GET_BLOCKS(count)), dim3(TENSORRT_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, input_featrue_shape_[0], input_featrue_shape_[1], input_featrue_shape_[2], output_shape_[1], output_shape_[2], bottom_rois, output_shape_[0], output_shape_[1], top_data_, mapping_channel_ptr); hipFree(mapping_channel_ptr); }
607cb7b943a7619d9775b711e62da6cafe78ee78.cu
#include "layers/psroi_pooling.h" #define ROUND(x) ((int)((x) + (float)0.5)) using std::max; using std::min; PsroiPooling::PsroiPooling(void **output, vector<int> input_feature_shape, vector<int> input_roi_shape, vector<int> output_shape, float spatial_sacle) { input_featrue_shape_ = input_feature_shape; input_roi_shape_ = input_roi_shape; output_shape_ = output_shape; spatial_scale_ = spatial_sacle; top_data_ = reinterpret_cast<float *>(output[0]); } __global__ void PSROIPoolingForward( const int nthreads, const float *bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float *bottom_rois, const int output_dim, const int group_size, float *top_data, int *mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = static_cast<float>(ROUND(bottom_rois[1])) * spatial_scale; float roi_start_h = static_cast<float>(ROUND(bottom_rois[2])) * spatial_scale; float roi_end_w = static_cast<float>(ROUND(bottom_rois[3]) + 1.) * spatial_scale; float roi_end_h = static_cast<float>(ROUND(bottom_rois[4]) + 1.) * spatial_scale; //printf("%f %f %f %f\n",roi_start_w,roi_start_h,roi_end_w,roi_end_h); // Force too small ROIs to be 1x1 float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 float roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom float bin_size_h = roi_height / static_cast<float>(pooled_height); float bin_size_w = roi_width / static_cast<float>(pooled_width); int hstart = floor(static_cast<float>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<float>(pw) * bin_size_w + roi_start_w); int hend = ceil(static_cast<float>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<float>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop * group_size + gh) * group_size + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; float out_sum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; out_sum += bottom_data[bottom_index]; } } float bin_area = (hend - hstart) * (wend - wstart); top_data[index] = is_empty ? 0. : out_sum / bin_area; mapping_channel[index] = c; } } void PsroiPooling::forward_gpu(const void *const *inputs) { const float *bottom_data = reinterpret_cast<const float *>(inputs[0]); const float *bottom_rois = reinterpret_cast<const float *>(inputs[1]); int *mapping_channel_ptr = nullptr; //std::cout<<input_roi_shape_[0]<<std::endl; cudaMalloc((void **)&mapping_channel_ptr, input_roi_shape_[0] * output_shape_[0] * output_shape_[1] * output_shape_[2] * sizeof(int)); cudaMemset(mapping_channel_ptr, -1, input_roi_shape_[0] * output_shape_[0] * output_shape_[1] * output_shape_[2] * sizeof(int)); cudaMemset(top_data_, 0, input_roi_shape_[0] * output_shape_[0] * output_shape_[1] * output_shape_[2] * sizeof(float)); int count = input_roi_shape_[0] * output_shape_[0] * output_shape_[1] * output_shape_[2]; PSROIPoolingForward<<<TENSORRT_GET_BLOCKS(count), TENSORRT_CUDA_NUM_THREADS>>>(count, bottom_data, spatial_scale_, input_featrue_shape_[0], input_featrue_shape_[1], input_featrue_shape_[2], output_shape_[1], output_shape_[2], bottom_rois, output_shape_[0], output_shape_[1], top_data_, mapping_channel_ptr); cudaFree(mapping_channel_ptr); }
2d8cc8e37533402a86f89d4e74d2e00acdb2d9e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <sys/time.h> #include <omp.h> /* This file can be downloaded from supercomputingblog.com. This is part of a series of tutorials that demonstrate how to use CUDA The tutorials will also demonstrate the speed of using CUDA */ // IMPORTANT NOTE: for this data size, your graphics card should have at least 256 megabytes of memory. // If your GPU has less memory, then you will need to decrease this data size. #define MAX_DATA_SIZE 1024*1024*32 // about 32 million elements. // The max data size must be an integer multiple of 128*256, because each block will have 256 threads, // and the block grid width will be 128. These are arbitrary numbers I choose. #define THREADS_PER_BLOCK 256 #define BLOCKS_PER_GRID_ROW 128 double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } __global__ void getStats(float *pArray, float *pMaxResults, float *pMinResults, float *pAvgResults) { // Declare arrays to be in shared memory. // 256 elements * (4 bytes / element) * 3 = 3KB. __shared__ float min[256]; __shared__ float max[256]; __shared__ float avg[256]; // Calculate which element this thread reads from memory int arrayIndex = 128*256*blockIdx.y + 256*blockIdx.x + threadIdx.x; min[threadIdx.x] = max[threadIdx.x] = avg[threadIdx.x] = pArray[arrayIndex]; __syncthreads(); int nTotalThreads = blockDim.x; // Total number of active threads while(nTotalThreads > 1) { int halfPoint = (nTotalThreads >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread float temp = min[threadIdx.x + halfPoint]; if (temp < min[threadIdx.x]) min[threadIdx.x] = temp; temp = max[threadIdx.x + halfPoint]; if (temp > max[threadIdx.x]) max[threadIdx.x] = temp; // when calculating the average, sum and divide avg[threadIdx.x] += avg[threadIdx.x + halfPoint]; avg[threadIdx.x] /= 2; } __syncthreads(); nTotalThreads = (nTotalThreads >> 1); // divide by two. } // At this point in time, thread zero has the min, max, and average // It's time for thread zero to write it's final results. // Note that the address structure of pResults is different, because // there is only one value for every thread block. if (threadIdx.x == 0) { pMaxResults[128*blockIdx.y + blockIdx.x] = max[0]; pMinResults[128*blockIdx.y + blockIdx.x] = min[0]; pAvgResults[128*blockIdx.y + blockIdx.x] = avg[0]; } } void getStatsCPU(float *pArray, int nElems, float *pMin, float *pMax, float *pAvg) { // This function uses the CPU to find the min, max and average of an array if (nElems <= 0) return; float min, max, avg; min = max = avg = pArray[0]; for (int i=1; i < nElems; i++) { float temp = pArray[i]; if (temp < min) min = temp; if (temp > max) max = temp; avg += temp; // we will divide once after for loop for speed. } avg /= (float)nElems; *pMin = min; *pMax = max; *pAvg = avg; } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ float *h_data, *h_resultMax, *h_resultMin, *h_resultAvg; float *d_data, *d_resultMax, *d_resultMin, *d_resultAvg; double gpuTime; int i; timeval start, end; printf("Initializing data...\n"); h_data = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_resultMax = (float *)malloc(sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); h_resultMin = (float *)malloc(sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); h_resultAvg = (float *)malloc(sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); hipMalloc( (void **)&d_data, sizeof(float) * MAX_DATA_SIZE); hipMalloc( (void **)&d_resultMax, sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); hipMalloc( (void **)&d_resultMin, sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); hipMalloc( (void **)&d_resultAvg, sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); srand(123); for(i = 0; i < MAX_DATA_SIZE; i++) { h_data[i] = (float)rand() / (float)RAND_MAX; } int firstRun = 1; // Indicates if it's the first execution of the for loop const int useGPU = 1; // When 0, only the CPU is used. When 1, only the GPU is used for (int dataAmount = MAX_DATA_SIZE; dataAmount > BLOCKS_PER_GRID_ROW*THREADS_PER_BLOCK; dataAmount /= 2) { float tempMin,tempMax,tempAvg; int blockGridWidth = BLOCKS_PER_GRID_ROW; int blockGridHeight = (dataAmount / THREADS_PER_BLOCK) / blockGridWidth; dim3 blockGridRows(blockGridWidth, blockGridHeight); dim3 threadBlockRows(THREADS_PER_BLOCK, 1); // Start the timer. // We want to measure copying data, running the kernel, and copying the results back to host gettimeofday(&start, NULL); if (useGPU == 1) { // Copy the data to the device hipMemcpy(d_data, h_data, sizeof(float) * dataAmount, hipMemcpyHostToDevice); // Do the multiplication on the GPU hipLaunchKernelGGL(( getStats), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, d_data, d_resultMax, d_resultMin, d_resultAvg); hipDeviceSynchronize(); // Copy the data back to the host hipMemcpy(h_resultMin, d_resultMin, sizeof(float) * dataAmount / THREADS_PER_BLOCK, hipMemcpyDeviceToHost); hipMemcpy(h_resultMax, d_resultMax, sizeof(float) * dataAmount / THREADS_PER_BLOCK, hipMemcpyDeviceToHost); hipMemcpy(h_resultAvg, d_resultAvg, sizeof(float) * dataAmount / THREADS_PER_BLOCK, hipMemcpyDeviceToHost); // Each block returned one result, so lets finish this off with the cpu. // By using CUDA, we basically reduced how much the CPU would have to work by about 256 times. tempMin = h_resultMin[0]; tempMax = h_resultMax[0]; tempAvg = h_resultAvg[0]; for (int i=1 ; i < dataAmount / THREADS_PER_BLOCK; i++) { if (h_resultMin[i] < tempMin) tempMin = h_resultMin[i]; if (h_resultMax[i] > tempMax) tempMax = h_resultMax[i]; tempAvg += h_resultAvg[i]; } tempAvg /= (dataAmount / THREADS_PER_BLOCK); } else { // We're using the CPU only getStatsCPU(h_data, dataAmount, &tempMin, &tempMax, &tempAvg); } printf("Min: %f Max %f Avg %f\n", tempMin, tempMax, tempAvg); // Stop the timer, print the total round trip execution time. gettimeofday(&end, NULL); gpuTime = myDiffTime(start, end); if (!firstRun || !useGPU) { printf("Elements: %d - convolution time : %f msec - %f Multiplications/sec\n", dataAmount, gpuTime, blockGridHeight * 128 * 256 / (gpuTime * 0.001)); } else { firstRun = 0; // We discard the results of the first run because of the extra overhead incurred // during the first time a kernel is ever executed. dataAmount *= 2; // reset to first run value } } printf("Cleaning up...\n"); hipFree(d_resultMin ); hipFree(d_resultMax ); hipFree(d_resultAvg ); hipFree(d_data); free(h_resultMin); free(h_resultMax); free(h_resultAvg); free(h_data); }
2d8cc8e37533402a86f89d4e74d2e00acdb2d9e5.cu
#include <stdio.h> #include <sys/time.h> #include <omp.h> /* This file can be downloaded from supercomputingblog.com. This is part of a series of tutorials that demonstrate how to use CUDA The tutorials will also demonstrate the speed of using CUDA */ // IMPORTANT NOTE: for this data size, your graphics card should have at least 256 megabytes of memory. // If your GPU has less memory, then you will need to decrease this data size. #define MAX_DATA_SIZE 1024*1024*32 // about 32 million elements. // The max data size must be an integer multiple of 128*256, because each block will have 256 threads, // and the block grid width will be 128. These are arbitrary numbers I choose. #define THREADS_PER_BLOCK 256 #define BLOCKS_PER_GRID_ROW 128 double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } __global__ void getStats(float *pArray, float *pMaxResults, float *pMinResults, float *pAvgResults) { // Declare arrays to be in shared memory. // 256 elements * (4 bytes / element) * 3 = 3KB. __shared__ float min[256]; __shared__ float max[256]; __shared__ float avg[256]; // Calculate which element this thread reads from memory int arrayIndex = 128*256*blockIdx.y + 256*blockIdx.x + threadIdx.x; min[threadIdx.x] = max[threadIdx.x] = avg[threadIdx.x] = pArray[arrayIndex]; __syncthreads(); int nTotalThreads = blockDim.x; // Total number of active threads while(nTotalThreads > 1) { int halfPoint = (nTotalThreads >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread float temp = min[threadIdx.x + halfPoint]; if (temp < min[threadIdx.x]) min[threadIdx.x] = temp; temp = max[threadIdx.x + halfPoint]; if (temp > max[threadIdx.x]) max[threadIdx.x] = temp; // when calculating the average, sum and divide avg[threadIdx.x] += avg[threadIdx.x + halfPoint]; avg[threadIdx.x] /= 2; } __syncthreads(); nTotalThreads = (nTotalThreads >> 1); // divide by two. } // At this point in time, thread zero has the min, max, and average // It's time for thread zero to write it's final results. // Note that the address structure of pResults is different, because // there is only one value for every thread block. if (threadIdx.x == 0) { pMaxResults[128*blockIdx.y + blockIdx.x] = max[0]; pMinResults[128*blockIdx.y + blockIdx.x] = min[0]; pAvgResults[128*blockIdx.y + blockIdx.x] = avg[0]; } } void getStatsCPU(float *pArray, int nElems, float *pMin, float *pMax, float *pAvg) { // This function uses the CPU to find the min, max and average of an array if (nElems <= 0) return; float min, max, avg; min = max = avg = pArray[0]; for (int i=1; i < nElems; i++) { float temp = pArray[i]; if (temp < min) min = temp; if (temp > max) max = temp; avg += temp; // we will divide once after for loop for speed. } avg /= (float)nElems; *pMin = min; *pMax = max; *pAvg = avg; } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ float *h_data, *h_resultMax, *h_resultMin, *h_resultAvg; float *d_data, *d_resultMax, *d_resultMin, *d_resultAvg; double gpuTime; int i; timeval start, end; printf("Initializing data...\n"); h_data = (float *)malloc(sizeof(float) * MAX_DATA_SIZE); h_resultMax = (float *)malloc(sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); h_resultMin = (float *)malloc(sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); h_resultAvg = (float *)malloc(sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); cudaMalloc( (void **)&d_data, sizeof(float) * MAX_DATA_SIZE); cudaMalloc( (void **)&d_resultMax, sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); cudaMalloc( (void **)&d_resultMin, sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); cudaMalloc( (void **)&d_resultAvg, sizeof(float) * MAX_DATA_SIZE / THREADS_PER_BLOCK); srand(123); for(i = 0; i < MAX_DATA_SIZE; i++) { h_data[i] = (float)rand() / (float)RAND_MAX; } int firstRun = 1; // Indicates if it's the first execution of the for loop const int useGPU = 1; // When 0, only the CPU is used. When 1, only the GPU is used for (int dataAmount = MAX_DATA_SIZE; dataAmount > BLOCKS_PER_GRID_ROW*THREADS_PER_BLOCK; dataAmount /= 2) { float tempMin,tempMax,tempAvg; int blockGridWidth = BLOCKS_PER_GRID_ROW; int blockGridHeight = (dataAmount / THREADS_PER_BLOCK) / blockGridWidth; dim3 blockGridRows(blockGridWidth, blockGridHeight); dim3 threadBlockRows(THREADS_PER_BLOCK, 1); // Start the timer. // We want to measure copying data, running the kernel, and copying the results back to host gettimeofday(&start, NULL); if (useGPU == 1) { // Copy the data to the device cudaMemcpy(d_data, h_data, sizeof(float) * dataAmount, cudaMemcpyHostToDevice); // Do the multiplication on the GPU getStats<<<blockGridRows, threadBlockRows>>>(d_data, d_resultMax, d_resultMin, d_resultAvg); cudaThreadSynchronize(); // Copy the data back to the host cudaMemcpy(h_resultMin, d_resultMin, sizeof(float) * dataAmount / THREADS_PER_BLOCK, cudaMemcpyDeviceToHost); cudaMemcpy(h_resultMax, d_resultMax, sizeof(float) * dataAmount / THREADS_PER_BLOCK, cudaMemcpyDeviceToHost); cudaMemcpy(h_resultAvg, d_resultAvg, sizeof(float) * dataAmount / THREADS_PER_BLOCK, cudaMemcpyDeviceToHost); // Each block returned one result, so lets finish this off with the cpu. // By using CUDA, we basically reduced how much the CPU would have to work by about 256 times. tempMin = h_resultMin[0]; tempMax = h_resultMax[0]; tempAvg = h_resultAvg[0]; for (int i=1 ; i < dataAmount / THREADS_PER_BLOCK; i++) { if (h_resultMin[i] < tempMin) tempMin = h_resultMin[i]; if (h_resultMax[i] > tempMax) tempMax = h_resultMax[i]; tempAvg += h_resultAvg[i]; } tempAvg /= (dataAmount / THREADS_PER_BLOCK); } else { // We're using the CPU only getStatsCPU(h_data, dataAmount, &tempMin, &tempMax, &tempAvg); } printf("Min: %f Max %f Avg %f\n", tempMin, tempMax, tempAvg); // Stop the timer, print the total round trip execution time. gettimeofday(&end, NULL); gpuTime = myDiffTime(start, end); if (!firstRun || !useGPU) { printf("Elements: %d - convolution time : %f msec - %f Multiplications/sec\n", dataAmount, gpuTime, blockGridHeight * 128 * 256 / (gpuTime * 0.001)); } else { firstRun = 0; // We discard the results of the first run because of the extra overhead incurred // during the first time a kernel is ever executed. dataAmount *= 2; // reset to first run value } } printf("Cleaning up...\n"); cudaFree(d_resultMin ); cudaFree(d_resultMax ); cudaFree(d_resultAvg ); cudaFree(d_data); free(h_resultMin); free(h_resultMax); free(h_resultAvg); free(h_data); }
e6e30420a4cda9b519bcb9d4b1c3add94978aaae.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cudaDinv_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int size = 1; const double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); double *y = NULL; hipMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cudaDinv_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cudaDinv_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cudaDinv_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e6e30420a4cda9b519bcb9d4b1c3add94978aaae.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cudaDinv_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int size = 1; const double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); double *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cudaDinv_kernel<<<gridBlock,threadBlock>>>(size,x,y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cudaDinv_kernel<<<gridBlock,threadBlock>>>(size,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cudaDinv_kernel<<<gridBlock,threadBlock>>>(size,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
80fdce4dc610f706b479f6b31320ceb127b7ff27.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_pr.cu * * @brief Simple test driver program for computing Pagerank. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <iostream> #include <cstdlib> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utils #include <gunrock/graphio/market.cuh> #include <gunrock/graphio/rmat.cuh> #include <gunrock/graphio/rgg.cuh> // BFS includes #include <gunrock/app/pr/pr_enactor.cuh> #include <gunrock/app/pr/pr_problem.cuh> #include <gunrock/app/pr/pr_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> // boost includes #include <boost/config.hpp> #include <boost/utility.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/page_rank.hpp> using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::pr; /****************************************************************************** * Defines, constants, globals ******************************************************************************/ template <typename VertexId, typename Value> struct RankPair { VertexId vertex_id; Value page_rank; RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {} }; template<typename RankPair> bool PRCompare( RankPair elem1, RankPair elem2) { return elem1.page_rank > elem2.page_rank; } /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "\ntest_pr <graph type> <graph type args> [--device=<device_index>] " "[--undirected] [--instrumented] [--quick=<0|1>] [--v]\n" "\n" "Graph types and args:\n" " market [<file>]\n" " Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n" " edges from stdin (or from the optionally-specified file).\n" " --device=<device_index> Set GPU device for running the graph primitive.\n" " --undirected If set then treat the graph as undirected.\n" " --instrumented If set then kernels keep track of queue-search_depth\n" " and barrier duty (a relative indicator of load imbalance.)\n" " --quick If set will skip the CPU validation code. Default: 0\n" ); } /** * @brief Displays the PageRank result * * @param[in] node_id Node vertex Id * @param[in] rank Rank value for the node * @param[in] nodes Number of nodes in the graph. */ template<typename VertexId, typename Value, typename SizeT> void DisplaySolution(VertexId *node, Value *rank, SizeT nodes) { int top = (nodes < 10) ? nodes : 10; // at most top 10 ranked nodes printf("\nTop %d Ranked Vertices and PageRanks:\n", top); for (int i = 0; i < top; ++i) { printf("Vertex ID: %d, PageRank: %5f\n", node[i], rank[i]); } } /** * @brief Compares the equivalence of two arrays. If incorrect, print the location * of the first incorrect value appears, the incorrect value, and the reference * value. * * @tparam T datatype of the values being compared with. * @tparam SizeT datatype of the array length. * * @param[in] computed Vector of values to be compared. * @param[in] reference Vector of reference values * @param[in] len Vector length * @param[in] verbose Whether to print values around the incorrect one. * * \return Zero if two vectors are exactly the same, non-zero if there is any difference. */ template <typename SizeT> int CompareResults_( float* computed, float* reference, SizeT len, bool verbose = true, bool quiet = false) { float THRESHOLD = 0.05f; int flag = 0; for (SizeT i = 0; i < len; i++) { // Use relative error rate here. bool is_right = true; if (fabs(computed[i]) < 0.01f && fabs(reference[i] - 1) < 0.01f) continue; if (fabs(computed[i] - 0.0) < 0.01f) { if (fabs(computed[i] - reference[i]) > THRESHOLD) is_right = false; } else { if (fabs((computed[i] - reference[i]) / reference[i]) > THRESHOLD) is_right = false; } if (!is_right && flag == 0) { if (!quiet) { printf("\nINCORRECT: [%lu]: ", (unsigned long) i); PrintValue<float>(computed[i]); PrintValue<float>(reference[i]); if (verbose) { printf("\nresult[..."); for (size_t j = (i >= 5) ? i - 5 : 0; (j < i + 5) && (j < len); j++) { PrintValue<float>(computed[j]); printf(", "); } printf("...]"); printf("\nreference[..."); for (size_t j = (i >= 5) ? i - 5 : 0; (j < i + 5) && (j < len); j++) { PrintValue<float>(reference[j]); printf(", "); } printf("...]"); } } flag += 1; } if (!is_right && flag > 0) flag += 1; } if (!quiet) { printf("\n"); if (!flag) { printf("CORRECT"); } } return flag; } /****************************************************************************** * PageRank Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference Page Rank implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on * @param[in] node_id Source node for personalized PageRank (if any) * @param[in] rank Host-side vector to store CPU computed labels for each node * @param[in] delta delta for computing PR * @param[in] error error threshold * @param[in] max_iteration max iteration to go */ template < typename VertexId, typename Value, typename SizeT > void SimpleReferencePageRank( const Csr<VertexId, Value, SizeT> &graph, VertexId *node_id, Value *rank, Value delta, Value error, SizeT max_iteration, bool directed, bool quiet = false) { using namespace boost; // preparation typedef adjacency_list< vecS, vecS, bidirectionalS, no_property, property<edge_index_t, int> > Graph; Graph g; for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j) { Graph::edge_descriptor e = add_edge(i, graph.column_indices[j], g).first; put(edge_index, g, e, i); } } // compute PageRank CpuTimer cpu_timer; cpu_timer.Start(); std::vector<Value> ranks(num_vertices(g)); page_rank(g, make_iterator_property_map( ranks.begin(), get(boost::vertex_index, g)), boost::graph::n_iterations(max_iteration)); cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); for (std::size_t i = 0; i < num_vertices(g); ++i) { rank[i] = ranks[i]; } // Sort the top ranked vertices RankPair<SizeT, Value> *pr_list = (RankPair<SizeT, Value>*)malloc( sizeof(RankPair<SizeT, Value>) * num_vertices(g)); for (int i = 0; i < num_vertices(g); ++i) { pr_list[i].vertex_id = i; pr_list[i].page_rank = rank[i]; } std::stable_sort(pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >); for (int i = 0; i < num_vertices(g); ++i) { node_id[i] = pr_list[i].vertex_id; rank[i] = pr_list[i].page_rank; } free(pr_list); if (!quiet) { printf("CPU PageRank finished in %lf msec.\n", elapsed); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * @tparam SIZE_CHECK * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK > void RunTests(Info<VertexId, Value, SizeT> *info) { typedef PRProblem <VertexId, SizeT, Value > PrProblem; typedef PREnactor <PrProblem, INSTRUMENT, DEBUG, SIZE_CHECK > PrEnactor; // parse configurations from mObject info Csr<VertexId, Value, SizeT> *graph = info->csr_ptr; VertexId src = info->info["source_vertex"].get_int64(); bool undirected = info->info["undirected"].get_bool(); bool quiet_mode = info->info["quiet_mode"].get_bool(); bool quick_mode = info->info["quick_mode"].get_bool(); bool stream_from_host = info->info["stream_from_host"].get_bool(); int max_grid_size = info->info["max_grid_size"].get_int(); int num_gpus = info->info["num_gpus"].get_int(); int max_iteration = info->info["max_iteration"].get_int(); double max_queue_sizing = info->info["max_queue_sizing"].get_real(); double max_queue_sizing1 = info->info["max_queue_sizing1"].get_real(); double max_in_sizing = info->info["max_in_sizing"].get_real(); std::string partition_method = info->info["partition_method"].get_str(); double partition_factor = info->info["partition_factor"].get_real(); int partition_seed = info->info["partition_seed"].get_int(); int iterations = info->info["num_iteration"].get_int(); int traversal_mode = info->info["traversal_mode"].get_int(); std::string ref_filename = info->info["ref_filename"].get_str(); Value delta = info->info["delta"].get_real(); Value error = info->info["error"].get_real(); json_spirit::mArray device_list = info->info["device_list"].get_array(); int* gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int(); // TODO: remove after merge mgpu-cq ContextPtr *context = (ContextPtr*) info->context; hipStream_t *streams = (hipStream_t*)info->streams; // Allocate host-side array (for both reference and GPU-computed results) Value *ref_rank = new Value [graph->nodes]; Value *h_rank = new Value [graph->nodes]; VertexId *h_node_id = new VertexId[graph->nodes]; VertexId *ref_node_id = new VertexId[graph->nodes]; Value *ref_check = (quick_mode) ? NULL : ref_rank; size_t *org_size = new size_t[num_gpus]; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; hipSetDevice(gpu_idx[gpu]); hipMemGetInfo(&(org_size[gpu]), &dummy); } PrEnactor* enactor = new PrEnactor(num_gpus, gpu_idx); // enactor map PrProblem *problem = new PrProblem; // allocate problem on GPU util::GRError(problem->Init( stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "PR Problem Init failed", __FILE__, __LINE__); util::GRError(enactor->Init( context, problem, traversal_mode, max_grid_size), "PR Enactor Init failed", __FILE__, __LINE__); double elapsed = 0.0f; // perform PageRank CpuTimer cpu_timer; for (int iter = 0; iter < iterations; ++iter) { util::GRError(problem->Reset( src, delta, error, max_iteration, enactor->GetFrontierType(), max_queue_sizing), "PR Problem Data Reset Failed", __FILE__, __LINE__); util::GRError(enactor->Reset(), "PR Enactor Reset Reset failed", __FILE__, __LINE__); if (!quiet_mode) { printf("__________________________\n"); fflush(stdout); } cpu_timer.Start(); util::GRError(enactor->Enact(traversal_mode), "PR Problem Enact Failed", __FILE__, __LINE__); cpu_timer.Stop(); if (!quiet_mode) { printf("--------------------------\n"); fflush(stdout); } elapsed += cpu_timer.ElapsedMillis(); } elapsed /= iterations; // copy out results util::GRError(problem->Extract(h_rank, h_node_id), "PR Problem Data Extraction Failed", __FILE__, __LINE__); if (!quiet_mode) { float total_pr = 0; for (int i = 0; i < graph->nodes; ++i) { total_pr += h_rank[i]; } printf("Total rank : %f\n", total_pr); } // compute reference CPU solution if (ref_check != NULL) { if (!quiet_mode) { printf("Computing reference value ...\n"); } SimpleReferencePageRank <VertexId, Value, SizeT>( *graph, ref_node_id, ref_check, delta, error, max_iteration, !undirected, quiet_mode); if (!quiet_mode) { printf("\n"); } } // Verify the result if (ref_check != NULL) { if (!quiet_mode) { printf("Validity Rank: "); } int errors_count = CompareResults_( h_rank, ref_check, graph->nodes, true, quiet_mode); if (errors_count > 0) { if (!quiet_mode) { printf("number of errors : %lld\n", (long long) errors_count); } } } if (!quiet_mode) { printf("\nFirst 40 labels of the GPU result."); // Display Solution DisplaySolution(h_node_id, h_rank, graph->nodes); } info->ComputeCommonStats( // compute running statistics enactor->enactor_stats.GetPointer(), elapsed); if (!quiet_mode) { info->DisplayStats(); // display collected statistics } info->CollectInfo(); // collected all the info and put into JSON mObject if (!quiet_mode) { printf("\n\tMemory Usage(B)\t"); for (int gpu = 0; gpu < num_gpus; gpu++) if (num_gpus > 1) {if (gpu != 0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);} else printf(" #keys%d,0\t #keys%d,1", gpu, gpu); if (num_gpus > 1) printf(" #keys%d", num_gpus); printf("\n"); double max_queue_sizing_[2] = {0, 0}, max_in_sizing_ = 0; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t gpu_free, dummy; hipSetDevice(gpu_idx[gpu]); hipMemGetInfo(&gpu_free, &dummy); printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free); for (int i = 0; i < num_gpus; i++) { for (int j = 0; j < 2; j++) { SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes); if (factor > max_queue_sizing_[j]) max_queue_sizing_[j] = factor; } if (num_gpus > 1 && i != 0 ) for (int t = 0; t < 2; t++) { SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i]; if (factor > max_in_sizing_) max_in_sizing_ = factor; } } if (num_gpus > 1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize())); printf("\n"); } printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]); if (num_gpus > 1) printf("\t in_sizing =\t %lf", max_in_sizing_); printf("\n"); } // Clean up if (org_size ) { delete org_size ; org_size = NULL; } if (problem ) { delete problem ; problem = NULL; } if (enactor ) { delete enactor ; enactor = NULL; } if (ref_rank ) { delete[] ref_rank ; ref_rank = NULL; } if (ref_node_id) { delete[] ref_node_id; ref_node_id = NULL; } if (h_rank ) { delete[] h_rank ; h_rank = NULL; } if (h_node_id ) { delete[] h_node_id ; h_node_id = NULL; } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * * @param[in] info Pointer to mObject info. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG > void RunTests_size_check(Info<VertexId, Value, SizeT> *info) { if (info->info["size_check"].get_bool()) { RunTests<VertexId, Value, SizeT, INSTRUMENT, DEBUG, true>(info); } else { RunTests<VertexId, Value, SizeT, INSTRUMENT, DEBUG, false>(info); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * * @param[in] info Pointer to mObject info. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT > void RunTests_debug(Info<VertexId, Value, SizeT> *info) { if (info->info["debug_mode"].get_bool()) { RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, true>(info); } else { RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, false>(info); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to mObject info. */ template < typename VertexId, typename Value, typename SizeT > void RunTests_instrumented(Info<VertexId, Value, SizeT> *info) { if (info->info["instrument"].get_bool()) { RunTests_debug<VertexId, Value, SizeT, true>(info); } else { RunTests_debug<VertexId, Value, SizeT, false>(info); } } /****************************************************************************** * Main ******************************************************************************/ int main(int argc, char** argv) { CommandLineArgs args(argc, argv); int graph_args = argc - args.ParsedArgc() - 1; if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help")) { Usage(); return 1; } typedef int VertexId; // Use int as the vertex identifier typedef float Value; // Use float as the value type typedef int SizeT; // Use int as the graph size type Csr<VertexId, Value, SizeT> csr(false); // graph we process on Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>; // graph construction or generation related parameters info->info["undirected"] = true; // require undirected input graph info->info["edge_value"] = false; // don't need per edge weight values info->Init("PageRank", args, csr); // initialize Info structure RunTests_instrumented<VertexId, Value, SizeT>(info); // run test return 0; }
80fdce4dc610f706b479f6b31320ceb127b7ff27.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_pr.cu * * @brief Simple test driver program for computing Pagerank. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <iostream> #include <cstdlib> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utils #include <gunrock/graphio/market.cuh> #include <gunrock/graphio/rmat.cuh> #include <gunrock/graphio/rgg.cuh> // BFS includes #include <gunrock/app/pr/pr_enactor.cuh> #include <gunrock/app/pr/pr_problem.cuh> #include <gunrock/app/pr/pr_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> // boost includes #include <boost/config.hpp> #include <boost/utility.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/page_rank.hpp> using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::pr; /****************************************************************************** * Defines, constants, globals ******************************************************************************/ template <typename VertexId, typename Value> struct RankPair { VertexId vertex_id; Value page_rank; RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {} }; template<typename RankPair> bool PRCompare( RankPair elem1, RankPair elem2) { return elem1.page_rank > elem2.page_rank; } /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "\ntest_pr <graph type> <graph type args> [--device=<device_index>] " "[--undirected] [--instrumented] [--quick=<0|1>] [--v]\n" "\n" "Graph types and args:\n" " market [<file>]\n" " Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n" " edges from stdin (or from the optionally-specified file).\n" " --device=<device_index> Set GPU device for running the graph primitive.\n" " --undirected If set then treat the graph as undirected.\n" " --instrumented If set then kernels keep track of queue-search_depth\n" " and barrier duty (a relative indicator of load imbalance.)\n" " --quick If set will skip the CPU validation code. Default: 0\n" ); } /** * @brief Displays the PageRank result * * @param[in] node_id Node vertex Id * @param[in] rank Rank value for the node * @param[in] nodes Number of nodes in the graph. */ template<typename VertexId, typename Value, typename SizeT> void DisplaySolution(VertexId *node, Value *rank, SizeT nodes) { int top = (nodes < 10) ? nodes : 10; // at most top 10 ranked nodes printf("\nTop %d Ranked Vertices and PageRanks:\n", top); for (int i = 0; i < top; ++i) { printf("Vertex ID: %d, PageRank: %5f\n", node[i], rank[i]); } } /** * @brief Compares the equivalence of two arrays. If incorrect, print the location * of the first incorrect value appears, the incorrect value, and the reference * value. * * @tparam T datatype of the values being compared with. * @tparam SizeT datatype of the array length. * * @param[in] computed Vector of values to be compared. * @param[in] reference Vector of reference values * @param[in] len Vector length * @param[in] verbose Whether to print values around the incorrect one. * * \return Zero if two vectors are exactly the same, non-zero if there is any difference. */ template <typename SizeT> int CompareResults_( float* computed, float* reference, SizeT len, bool verbose = true, bool quiet = false) { float THRESHOLD = 0.05f; int flag = 0; for (SizeT i = 0; i < len; i++) { // Use relative error rate here. bool is_right = true; if (fabs(computed[i]) < 0.01f && fabs(reference[i] - 1) < 0.01f) continue; if (fabs(computed[i] - 0.0) < 0.01f) { if (fabs(computed[i] - reference[i]) > THRESHOLD) is_right = false; } else { if (fabs((computed[i] - reference[i]) / reference[i]) > THRESHOLD) is_right = false; } if (!is_right && flag == 0) { if (!quiet) { printf("\nINCORRECT: [%lu]: ", (unsigned long) i); PrintValue<float>(computed[i]); PrintValue<float>(reference[i]); if (verbose) { printf("\nresult[..."); for (size_t j = (i >= 5) ? i - 5 : 0; (j < i + 5) && (j < len); j++) { PrintValue<float>(computed[j]); printf(", "); } printf("...]"); printf("\nreference[..."); for (size_t j = (i >= 5) ? i - 5 : 0; (j < i + 5) && (j < len); j++) { PrintValue<float>(reference[j]); printf(", "); } printf("...]"); } } flag += 1; } if (!is_right && flag > 0) flag += 1; } if (!quiet) { printf("\n"); if (!flag) { printf("CORRECT"); } } return flag; } /****************************************************************************** * PageRank Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference Page Rank implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on * @param[in] node_id Source node for personalized PageRank (if any) * @param[in] rank Host-side vector to store CPU computed labels for each node * @param[in] delta delta for computing PR * @param[in] error error threshold * @param[in] max_iteration max iteration to go */ template < typename VertexId, typename Value, typename SizeT > void SimpleReferencePageRank( const Csr<VertexId, Value, SizeT> &graph, VertexId *node_id, Value *rank, Value delta, Value error, SizeT max_iteration, bool directed, bool quiet = false) { using namespace boost; // preparation typedef adjacency_list< vecS, vecS, bidirectionalS, no_property, property<edge_index_t, int> > Graph; Graph g; for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j) { Graph::edge_descriptor e = add_edge(i, graph.column_indices[j], g).first; put(edge_index, g, e, i); } } // compute PageRank CpuTimer cpu_timer; cpu_timer.Start(); std::vector<Value> ranks(num_vertices(g)); page_rank(g, make_iterator_property_map( ranks.begin(), get(boost::vertex_index, g)), boost::graph::n_iterations(max_iteration)); cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); for (std::size_t i = 0; i < num_vertices(g); ++i) { rank[i] = ranks[i]; } // Sort the top ranked vertices RankPair<SizeT, Value> *pr_list = (RankPair<SizeT, Value>*)malloc( sizeof(RankPair<SizeT, Value>) * num_vertices(g)); for (int i = 0; i < num_vertices(g); ++i) { pr_list[i].vertex_id = i; pr_list[i].page_rank = rank[i]; } std::stable_sort(pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >); for (int i = 0; i < num_vertices(g); ++i) { node_id[i] = pr_list[i].vertex_id; rank[i] = pr_list[i].page_rank; } free(pr_list); if (!quiet) { printf("CPU PageRank finished in %lf msec.\n", elapsed); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * @tparam SIZE_CHECK * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG, bool SIZE_CHECK > void RunTests(Info<VertexId, Value, SizeT> *info) { typedef PRProblem <VertexId, SizeT, Value > PrProblem; typedef PREnactor <PrProblem, INSTRUMENT, DEBUG, SIZE_CHECK > PrEnactor; // parse configurations from mObject info Csr<VertexId, Value, SizeT> *graph = info->csr_ptr; VertexId src = info->info["source_vertex"].get_int64(); bool undirected = info->info["undirected"].get_bool(); bool quiet_mode = info->info["quiet_mode"].get_bool(); bool quick_mode = info->info["quick_mode"].get_bool(); bool stream_from_host = info->info["stream_from_host"].get_bool(); int max_grid_size = info->info["max_grid_size"].get_int(); int num_gpus = info->info["num_gpus"].get_int(); int max_iteration = info->info["max_iteration"].get_int(); double max_queue_sizing = info->info["max_queue_sizing"].get_real(); double max_queue_sizing1 = info->info["max_queue_sizing1"].get_real(); double max_in_sizing = info->info["max_in_sizing"].get_real(); std::string partition_method = info->info["partition_method"].get_str(); double partition_factor = info->info["partition_factor"].get_real(); int partition_seed = info->info["partition_seed"].get_int(); int iterations = info->info["num_iteration"].get_int(); int traversal_mode = info->info["traversal_mode"].get_int(); std::string ref_filename = info->info["ref_filename"].get_str(); Value delta = info->info["delta"].get_real(); Value error = info->info["error"].get_real(); json_spirit::mArray device_list = info->info["device_list"].get_array(); int* gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int(); // TODO: remove after merge mgpu-cq ContextPtr *context = (ContextPtr*) info->context; cudaStream_t *streams = (cudaStream_t*)info->streams; // Allocate host-side array (for both reference and GPU-computed results) Value *ref_rank = new Value [graph->nodes]; Value *h_rank = new Value [graph->nodes]; VertexId *h_node_id = new VertexId[graph->nodes]; VertexId *ref_node_id = new VertexId[graph->nodes]; Value *ref_check = (quick_mode) ? NULL : ref_rank; size_t *org_size = new size_t[num_gpus]; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; cudaSetDevice(gpu_idx[gpu]); cudaMemGetInfo(&(org_size[gpu]), &dummy); } PrEnactor* enactor = new PrEnactor(num_gpus, gpu_idx); // enactor map PrProblem *problem = new PrProblem; // allocate problem on GPU util::GRError(problem->Init( stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "PR Problem Init failed", __FILE__, __LINE__); util::GRError(enactor->Init( context, problem, traversal_mode, max_grid_size), "PR Enactor Init failed", __FILE__, __LINE__); double elapsed = 0.0f; // perform PageRank CpuTimer cpu_timer; for (int iter = 0; iter < iterations; ++iter) { util::GRError(problem->Reset( src, delta, error, max_iteration, enactor->GetFrontierType(), max_queue_sizing), "PR Problem Data Reset Failed", __FILE__, __LINE__); util::GRError(enactor->Reset(), "PR Enactor Reset Reset failed", __FILE__, __LINE__); if (!quiet_mode) { printf("__________________________\n"); fflush(stdout); } cpu_timer.Start(); util::GRError(enactor->Enact(traversal_mode), "PR Problem Enact Failed", __FILE__, __LINE__); cpu_timer.Stop(); if (!quiet_mode) { printf("--------------------------\n"); fflush(stdout); } elapsed += cpu_timer.ElapsedMillis(); } elapsed /= iterations; // copy out results util::GRError(problem->Extract(h_rank, h_node_id), "PR Problem Data Extraction Failed", __FILE__, __LINE__); if (!quiet_mode) { float total_pr = 0; for (int i = 0; i < graph->nodes; ++i) { total_pr += h_rank[i]; } printf("Total rank : %f\n", total_pr); } // compute reference CPU solution if (ref_check != NULL) { if (!quiet_mode) { printf("Computing reference value ...\n"); } SimpleReferencePageRank <VertexId, Value, SizeT>( *graph, ref_node_id, ref_check, delta, error, max_iteration, !undirected, quiet_mode); if (!quiet_mode) { printf("\n"); } } // Verify the result if (ref_check != NULL) { if (!quiet_mode) { printf("Validity Rank: "); } int errors_count = CompareResults_( h_rank, ref_check, graph->nodes, true, quiet_mode); if (errors_count > 0) { if (!quiet_mode) { printf("number of errors : %lld\n", (long long) errors_count); } } } if (!quiet_mode) { printf("\nFirst 40 labels of the GPU result."); // Display Solution DisplaySolution(h_node_id, h_rank, graph->nodes); } info->ComputeCommonStats( // compute running statistics enactor->enactor_stats.GetPointer(), elapsed); if (!quiet_mode) { info->DisplayStats(); // display collected statistics } info->CollectInfo(); // collected all the info and put into JSON mObject if (!quiet_mode) { printf("\n\tMemory Usage(B)\t"); for (int gpu = 0; gpu < num_gpus; gpu++) if (num_gpus > 1) {if (gpu != 0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);} else printf(" #keys%d,0\t #keys%d,1", gpu, gpu); if (num_gpus > 1) printf(" #keys%d", num_gpus); printf("\n"); double max_queue_sizing_[2] = {0, 0}, max_in_sizing_ = 0; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t gpu_free, dummy; cudaSetDevice(gpu_idx[gpu]); cudaMemGetInfo(&gpu_free, &dummy); printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free); for (int i = 0; i < num_gpus; i++) { for (int j = 0; j < 2; j++) { SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes); if (factor > max_queue_sizing_[j]) max_queue_sizing_[j] = factor; } if (num_gpus > 1 && i != 0 ) for (int t = 0; t < 2; t++) { SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i]; if (factor > max_in_sizing_) max_in_sizing_ = factor; } } if (num_gpus > 1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize())); printf("\n"); } printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]); if (num_gpus > 1) printf("\t in_sizing =\t %lf", max_in_sizing_); printf("\n"); } // Clean up if (org_size ) { delete org_size ; org_size = NULL; } if (problem ) { delete problem ; problem = NULL; } if (enactor ) { delete enactor ; enactor = NULL; } if (ref_rank ) { delete[] ref_rank ; ref_rank = NULL; } if (ref_node_id) { delete[] ref_node_id; ref_node_id = NULL; } if (h_rank ) { delete[] h_rank ; h_rank = NULL; } if (h_node_id ) { delete[] h_node_id ; h_node_id = NULL; } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * @tparam DEBUG * * @param[in] info Pointer to mObject info. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT, bool DEBUG > void RunTests_size_check(Info<VertexId, Value, SizeT> *info) { if (info->info["size_check"].get_bool()) { RunTests<VertexId, Value, SizeT, INSTRUMENT, DEBUG, true>(info); } else { RunTests<VertexId, Value, SizeT, INSTRUMENT, DEBUG, false>(info); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam INSTRUMENT * * @param[in] info Pointer to mObject info. */ template < typename VertexId, typename Value, typename SizeT, bool INSTRUMENT > void RunTests_debug(Info<VertexId, Value, SizeT> *info) { if (info->info["debug_mode"].get_bool()) { RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, true>(info); } else { RunTests_size_check<VertexId, Value, SizeT, INSTRUMENT, false>(info); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to mObject info. */ template < typename VertexId, typename Value, typename SizeT > void RunTests_instrumented(Info<VertexId, Value, SizeT> *info) { if (info->info["instrument"].get_bool()) { RunTests_debug<VertexId, Value, SizeT, true>(info); } else { RunTests_debug<VertexId, Value, SizeT, false>(info); } } /****************************************************************************** * Main ******************************************************************************/ int main(int argc, char** argv) { CommandLineArgs args(argc, argv); int graph_args = argc - args.ParsedArgc() - 1; if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help")) { Usage(); return 1; } typedef int VertexId; // Use int as the vertex identifier typedef float Value; // Use float as the value type typedef int SizeT; // Use int as the graph size type Csr<VertexId, Value, SizeT> csr(false); // graph we process on Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>; // graph construction or generation related parameters info->info["undirected"] = true; // require undirected input graph info->info["edge_value"] = false; // don't need per edge weight values info->Init("PageRank", args, csr); // initialize Info structure RunTests_instrumented<VertexId, Value, SizeT>(info); // run test return 0; }
2f54f6cc2973a610ced29bd7c5fd2a4be5d1bff9.hip
// !!! This is a file automatically generated by hipify!!! // #include "terrain_manager.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" # include <cassert> # include <iostream> # include <fstream> # include <sstream> int main() { uint mesh_width = 2; uint mesh_height = 2; terrain_manager tm(mesh_height, mesh_width); tm.stair_terrain(glm::vec2(1,3),glm::vec2(0.2,1),glm::vec2(0,100)); // tm.export_obj("test_stair"); tm.gap_terrain(10, 3,glm::vec2(100,400)); // tm.export_obj("test_gap"); tm.wall_terrain(30,glm::vec2(1,2),glm::vec2(200,300)); tm.obstacle_terrain(1000,glm::vec2(1,3),glm::vec2(100,200)); // tm.uniforma_randomize(0.2); tm.perlin_randomize(); tm.new_export("test_mixed_perlin"); return 0; } //#include <stdio.h> // //class A{ // // int data; //public: // A() { data = 0;} // __host__ __device__ // void increment() { data++;} // __host__ __device__ // void print_data() { printf("data = %d\n", data);} //}; // //__global__ void test(A a){ // // a.increment(); // a.print_data(); //} // //int main(){ // // A h_a; // h_a.increment(); // h_a.print_data(); // test<<<1,1>>>(h_a); // hipDeviceSynchronize(); //}
2f54f6cc2973a610ced29bd7c5fd2a4be5d1bff9.cu
// #include "terrain_manager.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" # include <cassert> # include <iostream> # include <fstream> # include <sstream> int main() { uint mesh_width = 2; uint mesh_height = 2; terrain_manager tm(mesh_height, mesh_width); tm.stair_terrain(glm::vec2(1,3),glm::vec2(0.2,1),glm::vec2(0,100)); // tm.export_obj("test_stair"); tm.gap_terrain(10, 3,glm::vec2(100,400)); // tm.export_obj("test_gap"); tm.wall_terrain(30,glm::vec2(1,2),glm::vec2(200,300)); tm.obstacle_terrain(1000,glm::vec2(1,3),glm::vec2(100,200)); // tm.uniforma_randomize(0.2); tm.perlin_randomize(); tm.new_export("test_mixed_perlin"); return 0; } //#include <stdio.h> // //class A{ // // int data; //public: // A() { data = 0;} // __host__ __device__ // void increment() { data++;} // __host__ __device__ // void print_data() { printf("data = %d\n", data);} //}; // //__global__ void test(A a){ // // a.increment(); // a.print_data(); //} // //int main(){ // // A h_a; // h_a.increment(); // h_a.print_data(); // test<<<1,1>>>(h_a); // cudaDeviceSynchronize(); //}
0dfcb55aea2f7132a8ea3539c1a62eaafd52e3ac.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include <cmath> #include <iostream> #include <algorithm> #include <complex> #include <boost/math/constants/constants.hpp> //#include <boost/numeric/ublas/vector.hpp> #include <boost/numeric/odeint.hpp> #include <boost/numeric/ublas/matrix.hpp> #include <boost/numeric/ublas/io.hpp> #include <boost/numeric/ublas/matrix_proxy.hpp> #include "matrix.h" #include "mex.h" //using namespace std; using namespace boost::numeric::ublas; using namespace boost::numeric::odeint; const double pi = boost::math::constants::pi<double>(); // --------------------- Newly added ---------------------- #include <hip/hip_runtime.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define IDX2C(i,j,ld) (((j)*(ld))+(i)) void MatrixProdMatrix(const double* A,const double* B, double * C,int m,int r,int n,double* time_trans,double* time_cal){ /* Input: matrix A of size m*r matrix B of size r*m Output: matrix C = A*B time_cal: calculation time */ double* A_gpu; double* B_gpu; double* C_gpu; *time_trans = 0; *time_cal = 0; const double alf = 1; const double bet = 0; const double *alpha = &alf; const double *beta = &bet; clock_t start = clock(); hipMalloc(&A_gpu,m*r*sizeof(double)); hipMalloc(&B_gpu,n*r*sizeof(double)); hipMalloc(&C_gpu,m*n*sizeof(double)); hipMemset(C_gpu,0,m*n*sizeof(double)); hipMemcpy(A_gpu,A,r*m*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(B_gpu,B,r*m*sizeof(double),hipMemcpyHostToDevice); clock_t end = clock(); *time_trans += (double) (end-start) / CLOCKS_PER_SEC * 1000.0; // calculation start = clock(); hipblasHandle_t handle; hipblasCreate(&handle); hipblasDgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,m,n,r,alpha,A_gpu,m,B_gpu,r,beta,C_gpu,m); end = clock(); *time_cal += (double) (end-start) / CLOCKS_PER_SEC * 1000.0; //Transmission hipMemcpy(C,C_gpu,n*m*sizeof(double),hipMemcpyDeviceToHost); hipFree(C_gpu); hipFree(A_gpu); hipFree(B_gpu); } void mexFunction(int nlhs, mxArray *plhs[], int nlrs, const mxArray *prhs[]) { mwSize n,r,m; m = mxGetM(prhs[0]); r = mxGetN(prhs[0]); n = mxGetN(prhs[1]); double *A; double *B; // get matrix A and B A = (double *)mxGetPr(prhs[0]); B = (double *)mxGetPr(prhs[1]); double * C; plhs[0] = mxCreateDoubleMatrix(m, n, mxREAL); plhs[1] = mxCreateDoubleMatrix(1, 1, mxREAL); plhs[2] = mxCreateDoubleMatrix(1, 1, mxREAL); C = mxGetPr(plhs[0]); double * time_cal = mxGetPr(plhs[1]); double * time_trans = mxGetPr(plhs[2]); MatrixProdMatrix(A,B,C,m,r,n,time_cal,time_trans); hipDeviceSynchronize(); return; }
0dfcb55aea2f7132a8ea3539c1a62eaafd52e3ac.cu
#include <vector> #include <cmath> #include <iostream> #include <algorithm> #include <complex> #include <boost/math/constants/constants.hpp> //#include <boost/numeric/ublas/vector.hpp> #include <boost/numeric/odeint.hpp> #include <boost/numeric/ublas/matrix.hpp> #include <boost/numeric/ublas/io.hpp> #include <boost/numeric/ublas/matrix_proxy.hpp> #include "matrix.h" #include "mex.h" //using namespace std; using namespace boost::numeric::ublas; using namespace boost::numeric::odeint; const double pi = boost::math::constants::pi<double>(); // --------------------- Newly added ---------------------- #include <cuda_runtime.h> #include "cublas_v2.h" #include <curand.h> #include <curand_kernel.h> #define IDX2C(i,j,ld) (((j)*(ld))+(i)) void MatrixProdMatrix(const double* A,const double* B, double * C,int m,int r,int n,double* time_trans,double* time_cal){ /* Input: matrix A of size m*r matrix B of size r*m Output: matrix C = A*B time_cal: calculation time */ double* A_gpu; double* B_gpu; double* C_gpu; *time_trans = 0; *time_cal = 0; const double alf = 1; const double bet = 0; const double *alpha = &alf; const double *beta = &bet; clock_t start = clock(); cudaMalloc(&A_gpu,m*r*sizeof(double)); cudaMalloc(&B_gpu,n*r*sizeof(double)); cudaMalloc(&C_gpu,m*n*sizeof(double)); cudaMemset(C_gpu,0,m*n*sizeof(double)); cudaMemcpy(A_gpu,A,r*m*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(B_gpu,B,r*m*sizeof(double),cudaMemcpyHostToDevice); clock_t end = clock(); *time_trans += (double) (end-start) / CLOCKS_PER_SEC * 1000.0; // calculation start = clock(); cublasHandle_t handle; cublasCreate(&handle); cublasDgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,m,n,r,alpha,A_gpu,m,B_gpu,r,beta,C_gpu,m); end = clock(); *time_cal += (double) (end-start) / CLOCKS_PER_SEC * 1000.0; //Transmission cudaMemcpy(C,C_gpu,n*m*sizeof(double),cudaMemcpyDeviceToHost); cudaFree(C_gpu); cudaFree(A_gpu); cudaFree(B_gpu); } void mexFunction(int nlhs, mxArray *plhs[], int nlrs, const mxArray *prhs[]) { mwSize n,r,m; m = mxGetM(prhs[0]); r = mxGetN(prhs[0]); n = mxGetN(prhs[1]); double *A; double *B; // get matrix A and B A = (double *)mxGetPr(prhs[0]); B = (double *)mxGetPr(prhs[1]); double * C; plhs[0] = mxCreateDoubleMatrix(m, n, mxREAL); plhs[1] = mxCreateDoubleMatrix(1, 1, mxREAL); plhs[2] = mxCreateDoubleMatrix(1, 1, mxREAL); C = mxGetPr(plhs[0]); double * time_cal = mxGetPr(plhs[1]); double * time_trans = mxGetPr(plhs[2]); MatrixProdMatrix(A,B,C,m,r,n,time_cal,time_trans); cudaDeviceSynchronize(); return; }
21aab88f822d7306d054fb97ef0437931d664879.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--blockDim=256 --gridDim=64 #include "common.h" // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStartD(uint *cellStart, // output: cell start index uint *cellEnd, // output: cell end index float4 *sortedPos, // output: sorted positions float4 *sortedVel, // output: sorted velocities uint *gridParticleHash, // input: sorted grid hashes uint *gridParticleIndex,// input: sorted particle indices float4 *oldPos, // input: sorted position array float4 *oldVel, // input: sorted velocity array uint numParticles) { // Precondition follows from calcHashD.cu #define tid (blockIdx.x * blockDim.x + threadIdx.x) __requires(__implies(tid < numParticles, gridParticleIndex[tid] == tid)); extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x,blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x+1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index-1]; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleIndex[index]; float4 pos = FETCH(oldPos, sortedIndex); // macro does either global read or texture fetch float4 vel = FETCH(oldVel, sortedIndex); // see particles_kernel.cuh sortedPos[index] = pos; sortedVel[index] = vel; } }
21aab88f822d7306d054fb97ef0437931d664879.cu
//pass //--blockDim=256 --gridDim=64 #include "common.h" // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStartD(uint *cellStart, // output: cell start index uint *cellEnd, // output: cell end index float4 *sortedPos, // output: sorted positions float4 *sortedVel, // output: sorted velocities uint *gridParticleHash, // input: sorted grid hashes uint *gridParticleIndex,// input: sorted particle indices float4 *oldPos, // input: sorted position array float4 *oldVel, // input: sorted velocity array uint numParticles) { // Precondition follows from calcHashD.cu #define tid (blockIdx.x * blockDim.x + threadIdx.x) __requires(__implies(tid < numParticles, gridParticleIndex[tid] == tid)); extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x,blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x+1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index-1]; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleIndex[index]; float4 pos = FETCH(oldPos, sortedIndex); // macro does either global read or texture fetch float4 vel = FETCH(oldVel, sortedIndex); // see particles_kernel.cuh sortedPos[index] = pos; sortedVel[index] = vel; } }
56fb7b6859f2d7becf2481885ab5bc835b08f4dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> __global__ void helloWorld(){ } int main(int argc, char const *argv[]){ hipLaunchKernelGGL(( helloWorld), dim3(1),dim3(1) , 0, 0, ); return 0; }
56fb7b6859f2d7becf2481885ab5bc835b08f4dc.cu
#include <iostream> __global__ void helloWorld(){ } int main(int argc, char const *argv[]){ helloWorld<<< 1,1 >>>(); return 0; }
ce58fbc1605a44387a356b0062a697cf96b3e86e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <omp.h> #include <stdio.h> #include "AstroAccelerate/params.h" #include "device_corner_turn_kernel.cu" //{{{ Corner-turn void corner_turn(unsigned short *d_input, float *d_output, int nchans, int nsamp) { //{{{ Simple corner turn on the GPU int divisions_in_t = CT; int divisions_in_f = CF; int num_blocks_t = nsamp / divisions_in_t; int num_blocks_f = nchans / divisions_in_f; printf("\nCORNER TURN!"); printf("\n%d %d", nsamp, nchans); printf("\n%d %d", divisions_in_t, divisions_in_f); printf("\n%d %d", num_blocks_t, num_blocks_f); dim3 threads_per_block(divisions_in_t, divisions_in_f); dim3 num_blocks(num_blocks_t, num_blocks_f); double start_t, end_t; start_t = omp_get_wtime(); hipLaunchKernelGGL(( simple_corner_turn_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, d_input, d_output, nchans, nsamp); hipDeviceSynchronize(); hipLaunchKernelGGL(( swap), dim3(num_blocks), dim3(threads_per_block), 0, 0, d_input, d_output, nchans, nsamp); hipDeviceSynchronize(); end_t = omp_get_wtime(); float time = (float) ( end_t - start_t ); printf("\nPerformed CT: %f (GPU estimate)", time); printf("\nCT Gops based on %.2f ops per channel per tsamp: %f", 10.0, ( ( 10.0 * ( divisions_in_t * divisions_in_f * num_blocks_t * num_blocks_f ) ) / ( time ) ) / 1000000000.0); printf("\nCT Device memory bandwidth in GB/s: %f", ( ( sizeof(float) + sizeof(unsigned short) ) * ( divisions_in_t * divisions_in_f * num_blocks_t * num_blocks_f ) ) / ( time ) / 1000000000.0); //hipMemcpy(d_input, d_output, inputsize, hipMemcpyDeviceToDevice); //}}} } //}}}
ce58fbc1605a44387a356b0062a697cf96b3e86e.cu
#include <omp.h> #include <stdio.h> #include "AstroAccelerate/params.h" #include "device_corner_turn_kernel.cu" //{{{ Corner-turn void corner_turn(unsigned short *d_input, float *d_output, int nchans, int nsamp) { //{{{ Simple corner turn on the GPU int divisions_in_t = CT; int divisions_in_f = CF; int num_blocks_t = nsamp / divisions_in_t; int num_blocks_f = nchans / divisions_in_f; printf("\nCORNER TURN!"); printf("\n%d %d", nsamp, nchans); printf("\n%d %d", divisions_in_t, divisions_in_f); printf("\n%d %d", num_blocks_t, num_blocks_f); dim3 threads_per_block(divisions_in_t, divisions_in_f); dim3 num_blocks(num_blocks_t, num_blocks_f); double start_t, end_t; start_t = omp_get_wtime(); simple_corner_turn_kernel<<<num_blocks, threads_per_block>>>(d_input, d_output, nchans, nsamp); cudaDeviceSynchronize(); swap<<<num_blocks, threads_per_block>>>(d_input, d_output, nchans, nsamp); cudaDeviceSynchronize(); end_t = omp_get_wtime(); float time = (float) ( end_t - start_t ); printf("\nPerformed CT: %f (GPU estimate)", time); printf("\nCT Gops based on %.2f ops per channel per tsamp: %f", 10.0, ( ( 10.0 * ( divisions_in_t * divisions_in_f * num_blocks_t * num_blocks_f ) ) / ( time ) ) / 1000000000.0); printf("\nCT Device memory bandwidth in GB/s: %f", ( ( sizeof(float) + sizeof(unsigned short) ) * ( divisions_in_t * divisions_in_f * num_blocks_t * num_blocks_f ) ) / ( time ) / 1000000000.0); //cudaMemcpy(d_input, d_output, inputsize, cudaMemcpyDeviceToDevice); //}}} } //}}}
be9834c961b5a85d4428539be02a78fee857276f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void square(float* d_dout, float* d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_dout[idx] = f * f; } int main(int argc, char* argv[]){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_in[ARRAY_SIZE]; for (int i=0; i < ARRAY_SIZE; i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; float* d_in; float* d_out; hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_out, ARRAY_BYTES); hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in); // 1, 64 threads, GPU data hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); for (int i = 0; i < ARRAY_SIZE; i++){ printf("%f \n", h_out[i]); } hipFree(d_in); hipFree(d_out); return 0; }
be9834c961b5a85d4428539be02a78fee857276f.cu
#include <stdio.h> __global__ void square(float* d_dout, float* d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_dout[idx] = f * f; } int main(int argc, char* argv[]){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_in[ARRAY_SIZE]; for (int i=0; i < ARRAY_SIZE; i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; float* d_in; float* d_out; cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); square<<<1, ARRAY_SIZE>>>(d_out, d_in); // 1, 64 threads, GPU data cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); for (int i = 0; i < ARRAY_SIZE; i++){ printf("%f \n", h_out[i]); } cudaFree(d_in); cudaFree(d_out); return 0; }
a64cd44b33fd6a823942adec864a4cfd965f3041.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_computeModelMany2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int sizeImage = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); double *amplitude = NULL; hipMalloc(&amplitude, XSIZE*YSIZE); double *background = NULL; hipMalloc(&background, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_computeModelMany2), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizeImage,result,x,amplitude,background); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_computeModelMany2), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizeImage,result,x,amplitude,background); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_computeModelMany2), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizeImage,result,x,amplitude,background); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a64cd44b33fd6a823942adec864a4cfd965f3041.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_computeModelMany2.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int sizeImage = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); double *amplitude = NULL; cudaMalloc(&amplitude, XSIZE*YSIZE); double *background = NULL; cudaMalloc(&background, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_computeModelMany2<<<gridBlock,threadBlock>>>(n,sizeImage,result,x,amplitude,background); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_computeModelMany2<<<gridBlock,threadBlock>>>(n,sizeImage,result,x,amplitude,background); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_computeModelMany2<<<gridBlock,threadBlock>>>(n,sizeImage,result,x,amplitude,background); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a2a70f3c10d0bad18b4add602d22d4e17e71b28b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void indirect_rw_kernel_gpu( const double *variables_a, const double *variables_b, const double *edge_weight, double *fluxes_a, double *fluxes_b) { double ex = edge_weight[0]; double ey = edge_weight[1]; double ez = edge_weight[2]; double p_a, pe_a; double3 momentum_a; p_a = variables_a[VAR_DENSITY]; momentum_a.x = variables_a[VAR_MOMENTUM+0]; momentum_a.y = variables_a[VAR_MOMENTUM+1]; momentum_a.z = variables_a[VAR_MOMENTUM+2]; pe_a = variables_a[VAR_DENSITY_ENERGY]; double p_b, pe_b; double3 momentum_b; p_b = variables_b[VAR_DENSITY]; momentum_b.x = variables_b[VAR_MOMENTUM+0]; momentum_b.y = variables_b[VAR_MOMENTUM+1]; momentum_b.z = variables_b[VAR_MOMENTUM+2]; pe_b = variables_b[VAR_DENSITY_ENERGY]; double p_a_val = p_b + ex; double pe_a_val = pe_b + ey; double mx_a_val = momentum_b.x + ez; double my_a_val = momentum_b.y; double mz_a_val = momentum_b.z; double p_b_val = p_a; double pe_b_val = pe_a; double mx_b_val = momentum_a.x; double my_b_val = momentum_a.y; double mz_b_val = momentum_a.z; fluxes_a[VAR_DENSITY] += p_a_val; fluxes_a[VAR_MOMENTUM+0] += mx_a_val; fluxes_a[VAR_MOMENTUM+1] += my_a_val; fluxes_a[VAR_MOMENTUM+2] += mz_a_val; fluxes_a[VAR_DENSITY_ENERGY] += pe_a_val; fluxes_b[VAR_DENSITY] += p_b_val; fluxes_b[VAR_MOMENTUM+0] += mx_b_val; fluxes_b[VAR_MOMENTUM+1] += my_b_val; fluxes_b[VAR_MOMENTUM+2] += mz_b_val; fluxes_b[VAR_DENSITY_ENERGY] += pe_b_val; } // CUDA kernel function __global__ void op_cuda_indirect_rw_kernel( const double *__restrict ind_arg0, double *__restrict ind_arg1, const int *__restrict opDat0Map, const double *__restrict arg2, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { double arg3_l[5]; double arg4_l[5]; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; if (n<nelem) { //initialise local variables for ( int d=0; d<5; d++ ){ arg3_l[d] = ZERO_double; } for ( int d=0; d<5; d++ ){ arg4_l[d] = ZERO_double; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; //user-supplied kernel call indirect_rw_kernel_gpu(ind_arg0+map0idx*5, ind_arg0+map1idx*5, arg2+(n+offset_b)*3, arg3_l, arg4_l); col2 = colors[n+offset_b]; } //store local variables for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg3_l[0] += ind_arg1[0+map0idx*5]; arg3_l[1] += ind_arg1[1+map0idx*5]; arg3_l[2] += ind_arg1[2+map0idx*5]; arg3_l[3] += ind_arg1[3+map0idx*5]; arg3_l[4] += ind_arg1[4+map0idx*5]; ind_arg1[0+map0idx*5] = arg3_l[0]; ind_arg1[1+map0idx*5] = arg3_l[1]; ind_arg1[2+map0idx*5] = arg3_l[2]; ind_arg1[3+map0idx*5] = arg3_l[3]; ind_arg1[4+map0idx*5] = arg3_l[4]; arg4_l[0] += ind_arg1[0+map1idx*5]; arg4_l[1] += ind_arg1[1+map1idx*5]; arg4_l[2] += ind_arg1[2+map1idx*5]; arg4_l[3] += ind_arg1[3+map1idx*5]; arg4_l[4] += ind_arg1[4+map1idx*5]; ind_arg1[0+map1idx*5] = arg4_l[0]; ind_arg1[1+map1idx*5] = arg4_l[1]; ind_arg1[2+map1idx*5] = arg4_l[2]; ind_arg1[3+map1idx*5] = arg4_l[3]; ind_arg1[4+map1idx*5] = arg4_l[4]; } __syncthreads(); } } } //host stub function void op_par_loop_indirect_rw_kernel(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4){ int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(12); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[12].name = name; OP_kernels[12].count += 1; int ninds = 2; int inds[5] = {0,0,-1,1,1}; if (OP_diags>2) { printf(" kernel routine with indirection: indirect_rw_kernel\n"); } //get plan #ifdef OP_PART_SIZE_12 int part_size = OP_PART_SIZE_12; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_12 int nthread = OP_BLOCK_SIZE_12; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { hipLaunchKernelGGL(( op_cuda_indirect_rw_kernel), dim3(nblocks),dim3(nthread), 0, 0, (double *)arg0.data_d, (double *)arg3.data_d, arg0.map_data_d, (double*)arg2.data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[12].transfer += Plan->transfer; OP_kernels[12].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[12].time += wall_t2 - wall_t1; }
a2a70f3c10d0bad18b4add602d22d4e17e71b28b.cu
// // auto-generated by op2.py // //user function __device__ void indirect_rw_kernel_gpu( const double *variables_a, const double *variables_b, const double *edge_weight, double *fluxes_a, double *fluxes_b) { double ex = edge_weight[0]; double ey = edge_weight[1]; double ez = edge_weight[2]; double p_a, pe_a; double3 momentum_a; p_a = variables_a[VAR_DENSITY]; momentum_a.x = variables_a[VAR_MOMENTUM+0]; momentum_a.y = variables_a[VAR_MOMENTUM+1]; momentum_a.z = variables_a[VAR_MOMENTUM+2]; pe_a = variables_a[VAR_DENSITY_ENERGY]; double p_b, pe_b; double3 momentum_b; p_b = variables_b[VAR_DENSITY]; momentum_b.x = variables_b[VAR_MOMENTUM+0]; momentum_b.y = variables_b[VAR_MOMENTUM+1]; momentum_b.z = variables_b[VAR_MOMENTUM+2]; pe_b = variables_b[VAR_DENSITY_ENERGY]; double p_a_val = p_b + ex; double pe_a_val = pe_b + ey; double mx_a_val = momentum_b.x + ez; double my_a_val = momentum_b.y; double mz_a_val = momentum_b.z; double p_b_val = p_a; double pe_b_val = pe_a; double mx_b_val = momentum_a.x; double my_b_val = momentum_a.y; double mz_b_val = momentum_a.z; fluxes_a[VAR_DENSITY] += p_a_val; fluxes_a[VAR_MOMENTUM+0] += mx_a_val; fluxes_a[VAR_MOMENTUM+1] += my_a_val; fluxes_a[VAR_MOMENTUM+2] += mz_a_val; fluxes_a[VAR_DENSITY_ENERGY] += pe_a_val; fluxes_b[VAR_DENSITY] += p_b_val; fluxes_b[VAR_MOMENTUM+0] += mx_b_val; fluxes_b[VAR_MOMENTUM+1] += my_b_val; fluxes_b[VAR_MOMENTUM+2] += mz_b_val; fluxes_b[VAR_DENSITY_ENERGY] += pe_b_val; } // CUDA kernel function __global__ void op_cuda_indirect_rw_kernel( const double *__restrict ind_arg0, double *__restrict ind_arg1, const int *__restrict opDat0Map, const double *__restrict arg2, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { double arg3_l[5]; double arg4_l[5]; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; if (n<nelem) { //initialise local variables for ( int d=0; d<5; d++ ){ arg3_l[d] = ZERO_double; } for ( int d=0; d<5; d++ ){ arg4_l[d] = ZERO_double; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; //user-supplied kernel call indirect_rw_kernel_gpu(ind_arg0+map0idx*5, ind_arg0+map1idx*5, arg2+(n+offset_b)*3, arg3_l, arg4_l); col2 = colors[n+offset_b]; } //store local variables for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg3_l[0] += ind_arg1[0+map0idx*5]; arg3_l[1] += ind_arg1[1+map0idx*5]; arg3_l[2] += ind_arg1[2+map0idx*5]; arg3_l[3] += ind_arg1[3+map0idx*5]; arg3_l[4] += ind_arg1[4+map0idx*5]; ind_arg1[0+map0idx*5] = arg3_l[0]; ind_arg1[1+map0idx*5] = arg3_l[1]; ind_arg1[2+map0idx*5] = arg3_l[2]; ind_arg1[3+map0idx*5] = arg3_l[3]; ind_arg1[4+map0idx*5] = arg3_l[4]; arg4_l[0] += ind_arg1[0+map1idx*5]; arg4_l[1] += ind_arg1[1+map1idx*5]; arg4_l[2] += ind_arg1[2+map1idx*5]; arg4_l[3] += ind_arg1[3+map1idx*5]; arg4_l[4] += ind_arg1[4+map1idx*5]; ind_arg1[0+map1idx*5] = arg4_l[0]; ind_arg1[1+map1idx*5] = arg4_l[1]; ind_arg1[2+map1idx*5] = arg4_l[2]; ind_arg1[3+map1idx*5] = arg4_l[3]; ind_arg1[4+map1idx*5] = arg4_l[4]; } __syncthreads(); } } } //host stub function void op_par_loop_indirect_rw_kernel(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4){ int nargs = 5; op_arg args[5]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(12); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[12].name = name; OP_kernels[12].count += 1; int ninds = 2; int inds[5] = {0,0,-1,1,1}; if (OP_diags>2) { printf(" kernel routine with indirection: indirect_rw_kernel\n"); } //get plan #ifdef OP_PART_SIZE_12 int part_size = OP_PART_SIZE_12; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_12 int nthread = OP_BLOCK_SIZE_12; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { op_cuda_indirect_rw_kernel<<<nblocks,nthread>>>( (double *)arg0.data_d, (double *)arg3.data_d, arg0.map_data_d, (double*)arg2.data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[12].transfer += Plan->transfer; OP_kernels[12].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[12].time += wall_t2 - wall_t1; }
61e00b77bba31c9e43d9c21114e1d6543d9f9862.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void KernelLBSSimple(int aCount, const int* b_global, int bCount, int* indices_global) { __shared__ int data_shared[NT * VT]; int tid = threadIdx.x; // Load bCount elements from B into data_shared. int x[VT]; #pragma unroll for(int i = 0; i < VT; ++i) { int index = NT * i + tid; if(index < bCount) x[i] = b_global[index]; } #pragma unroll for(int i = 0; i < VT; ++i) data_shared[NT * i + tid] = x[i]; __syncthreads(); // Each thread searches for its Merge Path partition. int diag = VT * tid; int begin = max(0, diag - bCount); int end = min(diag, aCount); while(begin < end) { int mid = (begin + end)>> 1; int aKey = mid; int bKey = data_shared[diag - 1 - mid]; bool pred = aKey < bKey; if(pred) begin = mid + 1; else end = mid; } int mp = begin; // Sequentially search, comparing indices a to elements data_shared[b]. // Store indices for A in the right-side of the shared memory array. // This lets us complete the search in just a single pass, rather than // the search and compact passes of the generalized vectorized sorted // search function. int a = mp; int b = diag - a; #pragma unroll for(int i = 0; i < VT; ++i) { bool p; if(b >= bCount) p = true; else if(a >= aCount) p = false; else p = a < data_shared[b]; if(p) // If a < data_shared[b], advance A and store the index b - 1. data_shared[bCount + a++] = b - 1; else // Just advance b. ++b; } __syncthreads(); // Store all indices to global memory. for(int i = tid; i < aCount; i += NT) indices_global[i] = data_shared[bCount + i]; }
61e00b77bba31c9e43d9c21114e1d6543d9f9862.cu
#include "includes.h" __global__ void KernelLBSSimple(int aCount, const int* b_global, int bCount, int* indices_global) { __shared__ int data_shared[NT * VT]; int tid = threadIdx.x; // Load bCount elements from B into data_shared. int x[VT]; #pragma unroll for(int i = 0; i < VT; ++i) { int index = NT * i + tid; if(index < bCount) x[i] = b_global[index]; } #pragma unroll for(int i = 0; i < VT; ++i) data_shared[NT * i + tid] = x[i]; __syncthreads(); // Each thread searches for its Merge Path partition. int diag = VT * tid; int begin = max(0, diag - bCount); int end = min(diag, aCount); while(begin < end) { int mid = (begin + end)>> 1; int aKey = mid; int bKey = data_shared[diag - 1 - mid]; bool pred = aKey < bKey; if(pred) begin = mid + 1; else end = mid; } int mp = begin; // Sequentially search, comparing indices a to elements data_shared[b]. // Store indices for A in the right-side of the shared memory array. // This lets us complete the search in just a single pass, rather than // the search and compact passes of the generalized vectorized sorted // search function. int a = mp; int b = diag - a; #pragma unroll for(int i = 0; i < VT; ++i) { bool p; if(b >= bCount) p = true; else if(a >= aCount) p = false; else p = a < data_shared[b]; if(p) // If a < data_shared[b], advance A and store the index b - 1. data_shared[bCount + a++] = b - 1; else // Just advance b. ++b; } __syncthreads(); // Store all indices to global memory. for(int i = tid; i < aCount; i += NT) indices_global[i] = data_shared[bCount + i]; }
adfff7177939663a922b3a96c7fc708a09848efd.hip
// !!! This is a file automatically generated by hipify!!! #include "ExecutionRD.h" #include "hip/hip_runtime.h" #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include "device_launch_parameters.h" #include <rocblas.h> #include <stdio.h> #include <hipfft.h> hipComplex *d_vecX; hipComplex *d_ref; hipComplex *d_out; //Kernel void FFT(Complex *d_input1, Complex *d_input2); void IFFT(Complex *d_input); void matrixMul(Complex *d_buf, Complex * d_ref, Complex *Oput); static __global__ void devmatrixMul(hipComplex *X, hipComplex *Y, hipComplex *Oput); static __device__ __host__ inline hipComplex ComplexMul(hipComplex a, hipComplex b); static __device__ __host__ inline hipComplex ComplexConjugate(hipComplex a); void PauseCompression(Complex *h_buf, Complex *h_ref, Complex *Oput, Complex *test) { hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); { hipError_t error; int size_d_vecX = sizeof(Complex)* BATCH * LENGTH; int size_d_ref = sizeof(Complex)* BATCH * LENGTH; int size_d_out = sizeof(Complex)* BATCH * LENGTH; hipMalloc((void**)&d_vecX, size_d_vecX); hipMalloc((void**)&d_ref, size_d_ref); hipMalloc((void**)&d_out, size_d_out); error = hipMemcpy(d_vecX, h_buf, size_d_vecX, hipMemcpyHostToDevice); error = hipMemcpy(d_ref, h_ref, size_d_ref, hipMemcpyHostToDevice); FFT(d_ref, d_vecX); matrixMul(d_ref, d_vecX, d_out); IFFT(d_out); //hipMemcpy(test, d_ref, size_d_ref, hipMemcpyDeviceToHost); hipMemcpy(Oput, d_out, size_d_out, hipMemcpyDeviceToHost); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Pause Compression CUDA runtime is %f sec\n", time / 1e3); hipEventDestroy(start); hipEventDestroy(stop); //hipFree hipFree(d_vecX); hipFree(d_ref); hipFree(d_out); } void FFT(Complex *d_input1, Complex *d_input2) { hipfftHandle plan; // parameters #define RANK 1 int n[RANK] = { NX }; int istride = NX, ostride = NX; int idist = 1, odist = 1; int *inembed = NULL, *onembed = NULL; hipfftPlanMany(&plan, RANK, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, BATCH); hipfftExecC2C(plan, (hipfftComplex*)d_input1, (hipfftComplex*)d_input1, HIPFFT_FORWARD); hipfftExecC2C(plan, (hipfftComplex*)d_input2, (hipfftComplex*)d_input2, HIPFFT_FORWARD); hipDeviceSynchronize(); hipfftDestroy(plan); } void matrixMul(Complex *d_buf, Complex * d_ref, Complex *Oput) { dim3 dimBlock(1024, 1); int dimGrid = BATCH * LENGTH / 1024; devmatrixMul << <dimGrid, dimBlock >> >(d_ref, d_buf, Oput); } static __global__ void devmatrixMul(hipComplex *X, hipComplex *Y, hipComplex *Oput) { //block index int bx = blockIdx.x; //threads index int tx = threadIdx.x; int xBegin = bx * dimBlock_x; Oput[xBegin + tx] = ComplexMul(X[xBegin + tx], ComplexConjugate(Y[xBegin + tx])); __syncthreads(); } void IFFT(Complex *d_input) { hipfftHandle plan; // parameters #define RANK 1 int n[RANK] = { NX }; int istride = NX, ostride = NX; int idist = 1, odist = 1; int *inembed = NULL, *onembed = NULL; hipfftPlanMany(&plan, RANK, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, BATCH); hipfftExecC2C(plan, (hipfftComplex*)d_input, (hipfftComplex*)d_input, HIPFFT_BACKWARD); hipDeviceSynchronize(); hipfftDestroy(plan); } static __device__ __host__ inline hipComplex ComplexMul(hipComplex a, hipComplex b) { hipComplex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } static __device__ __host__ inline hipComplex ComplexConjugate(hipComplex a) { hipComplex b; b.x = a.x; b.y = -a.y; return b; }
adfff7177939663a922b3a96c7fc708a09848efd.cu
#include "ExecutionRD.h" #include "cuda_runtime.h" #include <cuda.h> #include <device_functions.h> #include "device_launch_parameters.h" #include <cublas.h> #include <stdio.h> #include <cufft.h> cuComplex *d_vecX; cuComplex *d_ref; cuComplex *d_out; //Kernel void FFT(Complex *d_input1, Complex *d_input2); void IFFT(Complex *d_input); void matrixMul(Complex *d_buf, Complex * d_ref, Complex *Oput); static __global__ void devmatrixMul(cuComplex *X, cuComplex *Y, cuComplex *Oput); static __device__ __host__ inline cuComplex ComplexMul(cuComplex a, cuComplex b); static __device__ __host__ inline cuComplex ComplexConjugate(cuComplex a); void PauseCompression(Complex *h_buf, Complex *h_ref, Complex *Oput, Complex *test) { cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); { cudaError_t error; int size_d_vecX = sizeof(Complex)* BATCH * LENGTH; int size_d_ref = sizeof(Complex)* BATCH * LENGTH; int size_d_out = sizeof(Complex)* BATCH * LENGTH; cudaMalloc((void**)&d_vecX, size_d_vecX); cudaMalloc((void**)&d_ref, size_d_ref); cudaMalloc((void**)&d_out, size_d_out); error = cudaMemcpy(d_vecX, h_buf, size_d_vecX, cudaMemcpyHostToDevice); error = cudaMemcpy(d_ref, h_ref, size_d_ref, cudaMemcpyHostToDevice); FFT(d_ref, d_vecX); matrixMul(d_ref, d_vecX, d_out); IFFT(d_out); //cudaMemcpy(test, d_ref, size_d_ref, cudaMemcpyDeviceToHost); cudaMemcpy(Oput, d_out, size_d_out, cudaMemcpyDeviceToHost); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Pause Compression CUDA runtime is %f sec\n", time / 1e3); cudaEventDestroy(start); cudaEventDestroy(stop); //cudaFree cudaFree(d_vecX); cudaFree(d_ref); cudaFree(d_out); } void FFT(Complex *d_input1, Complex *d_input2) { cufftHandle plan; // parameters #define RANK 1 int n[RANK] = { NX }; int istride = NX, ostride = NX; int idist = 1, odist = 1; int *inembed = NULL, *onembed = NULL; cufftPlanMany(&plan, RANK, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, BATCH); cufftExecC2C(plan, (cufftComplex*)d_input1, (cufftComplex*)d_input1, CUFFT_FORWARD); cufftExecC2C(plan, (cufftComplex*)d_input2, (cufftComplex*)d_input2, CUFFT_FORWARD); cudaDeviceSynchronize(); cufftDestroy(plan); } void matrixMul(Complex *d_buf, Complex * d_ref, Complex *Oput) { dim3 dimBlock(1024, 1); int dimGrid = BATCH * LENGTH / 1024; devmatrixMul << <dimGrid, dimBlock >> >(d_ref, d_buf, Oput); } static __global__ void devmatrixMul(cuComplex *X, cuComplex *Y, cuComplex *Oput) { //block index int bx = blockIdx.x; //threads index int tx = threadIdx.x; int xBegin = bx * dimBlock_x; Oput[xBegin + tx] = ComplexMul(X[xBegin + tx], ComplexConjugate(Y[xBegin + tx])); __syncthreads(); } void IFFT(Complex *d_input) { cufftHandle plan; // parameters #define RANK 1 int n[RANK] = { NX }; int istride = NX, ostride = NX; int idist = 1, odist = 1; int *inembed = NULL, *onembed = NULL; cufftPlanMany(&plan, RANK, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, BATCH); cufftExecC2C(plan, (cufftComplex*)d_input, (cufftComplex*)d_input, CUFFT_INVERSE); cudaDeviceSynchronize(); cufftDestroy(plan); } static __device__ __host__ inline cuComplex ComplexMul(cuComplex a, cuComplex b) { cuComplex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } static __device__ __host__ inline cuComplex ComplexConjugate(cuComplex a) { cuComplex b; b.x = a.x; b.y = -a.y; return b; }
6052f590a57b1678a766d7ac14a8c53875cac841.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/native/IndexingUtils.h> #include <ATen/ATen.h> #include <ATen/ceil_div.h> #include <ATen/NativeFunctions.h> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/Resize.h> #include <ATen/AccumulateType.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/cub.h> #include <c10/util/irange.h> #include <c10/core/QScheme.h> #include <limits> #include <c10/macros/Macros.h> namespace { template <typename scalar_t, int SZ> __global__ void indexing_backward_kernel( int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) { //numel is total number of flattened indices, not expanded to dimensions that are not indexed. //stride is the cumulative size of the not-indexed last dimensions //stride_before is the stride of the dimension immediately preceding first indexed dimension //if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case //outer_dim is number of elements in the first unindexed dimensions using accscalar_t = at::acc_type<scalar_t, true>; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same destination index as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values processed by each thread (grain size) for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){ int64_t idx = blockIdx.x * blockDim.y + threadIdx.y; if (idx < numel && (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){ do { int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; // if not accumulate, we only keep the last duplicate index so skip those before it if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) { idx++; continue; } const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before; const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride; const accscalar_t scale = (accscalar_t)1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; while (start_feature < stride) { #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); if (accumulate) { weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { if (accumulate) { weight[ii] += gradient[ii] * scale; } else { weight[ii] = gradient[ii] * scale; } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } start_feature += gridDim.y * blockDim.x * SZ; } idx++; } while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]); } } } } namespace at { namespace native { static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) { //we don't need to check range in backward - if there were out of bounds indices forward should already have errored out if (index.numel() != 0 && check_range) { auto max_idx = index.max().item<int64_t>(); auto min_idx = index.min().item<int64_t>(); if (max_idx >= dim_size) { TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } if (min_idx < -dim_size) { TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } } return index.remainder(dim_size); } static std::vector<int64_t> computeLinearStride(const Tensor & tensor) { // computes the stride as if tensor were contiguous auto sizes = tensor.sizes(); std::vector<int64_t> stride(tensor.dim()); stride[tensor.dim() - 1] = 1; std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>()); return stride; } static std::tuple<Tensor, int64_t, int64_t, int64_t> computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) { auto strides = computeLinearStride(src); const auto& device = src.options().device(); // Compute the linear index by multiplying the indexing tensors by the // stride and summing them. All the indexing tensors have the same shape at // this point. We also compute the number of dimensions before and after that // are not being index. Tensor linearIndex; int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0; for (const auto i: c10::irange(src.dim())) { if (indices[i].defined()) { // Cast index to the longType matching src's device // This allows us to support ie indexing a cuda tensor with a cpu tensor Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device); if (linearIndex.defined()) { linearIndex += index; } else { linearIndex = index; if (i>0) { strideBefore = src.stride(i-1); // stride after undefined dimensions } } } else if (linearIndex.defined()) { emptyAfter++; nElemAfter *= src.size(i); } else { emptyBefore++; nElemBefore *= src.size(i); } } return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter); } static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) { checkIndexTensorTypes(orig); // first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors auto indices = expandTensors(self, orig); // next broadcast all index tensors together indices = expand_outplace(indices); // add missing null Tensors so that it matches self.dim() while (indices.size() < (size_t)self.dim()) { indices.emplace_back(); } // if the non-null indices are not all adjacent, transpose self and indices // together so that they're adjacent at the front std::vector<int64_t> inversePerm; if (!hasContiguousSubspace(indices)) { std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices); } int64_t nElemBefore, strideBefore, nElemAfter; Tensor linearIndex; std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range); return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm); } void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices); namespace { int64_t largestIndex(const Tensor &self) { int64_t result = 0; for (const auto i: c10::irange(self.dim())) { result += (self.sizes()[i] - 1) * self.strides()[i]; } return result; } void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) { if (indices.size() > (size_t)self.dim()) { TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); } if (!self.is_contiguous()) { self = self.contiguous(); } Tensor linearIndex, src, expandedValue = value; int64_t nElemBefore, strideBefore, sliceSize; std::vector<int64_t> inversePerm; std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe); int64_t num_indices = linearIndex.numel(); if (expandedValue.numel() < num_indices * nElemBefore * sliceSize) { auto expanded_size = at::DimVector(expandedValue.sizes()); auto size1 = expandedValue.sizes(); auto size2 = linearIndex.sizes(); if (are_expandable(size1, size2)) { expanded_size = infer_size_dimvector(size1, size2); } if (nElemBefore > 1) { expanded_size.insert(expanded_size.begin(), nElemBefore); } expandedValue = expandedValue.expand(expanded_size); } expandedValue = expandedValue.contiguous(); if (num_indices > 0 && sliceSize > 0) { const bool permuted = !src.is_contiguous(); auto src_ = permuted ? src.contiguous() : src; linearIndex = linearIndex.reshape(-1); auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); linearIndex.divide_(sliceSize, "trunc"); // cub on CUDA <= 11.2 have a bug that for small sizes // cub's sort can be much slower than thrust's merge sort // this bug is fixed in CUDA 11.3 #if (defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11030) || defined(USE_ROCM) if (num_indices < 50000) { index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices); } else #endif { // Sort the inputs into sorted with the corresponding indices auto range = at::arange(num_indices, linearIndex.options()); // linearIndex can not be negative, and we take advantage of this // fact to sort on less bits for better performance. int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize); cuda::cub::radix_sort_pairs( linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(), num_indices, false, 0, nbits); } TORCH_INTERNAL_ASSERT( linearIndex.numel()*sliceSize*nElemBefore == expandedValue.numel(), "number of flattened indices did not match number of elements in the value tensor: ", linearIndex.numel()*sliceSize*nElemBefore, " vs ", expandedValue.numel()); const int UNROLL = 4; const int indices_per_block = 4; dim3 grid(ceil_div(num_indices, (int64_t) indices_per_block), std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))), ::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2])); dim3 block(C10_WARP_SIZE, indices_per_block); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, expandedValue.scalar_type(), "indexing_backward", [&] { hipLaunchKernelGGL(( indexing_backward_kernel<scalar_t, UNROLL>), dim3(grid), dim3(block), 0, stream, sorted_indices.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(), expandedValue.data_ptr<scalar_t>(), src_.data_ptr<scalar_t>(), num_indices, sliceSize, strideBefore, nElemBefore, accumulate); C10_HIP_KERNEL_LAUNCH_CHECK(); }); if (permuted) { self.copy_(src_.permute(inversePerm)); } } } REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel); } //anonymous // Check tensor dimensions for index operations, and return the slice size. static ptrdiff_t getSliceSize(const Tensor & dst, int dim, const Tensor & index, const Tensor & src) { const auto dstDims = dst.dim(); const auto srcDims = src.dim(); TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar"); ptrdiff_t dstSliceSize = 1; TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds"); for (const auto d: c10::irange(dstDims)) { if (d != dim) { dstSliceSize *= dst.size(d); } } TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds"); TORCH_CHECK(index.numel() == src.size(dim), "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (const auto d: c10::irange(srcDims)) { if (d != dim) { srcSliceSize *= src.size(d); if (!mismatch && dst.size(d) != src.size(d)) mismatch = true; } } TORCH_CHECK(dstSliceSize == srcSliceSize, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { TORCH_WARN_ONCE( "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } return dstSliceSize; } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexAddLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType innerSize, int64_t dstAddDimSize, T alpha) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha); } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexAddSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType totalSize, IndexType innerSize, int64_t dstAddDimSize, T alpha) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha); } } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. template <typename scalar_t> bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (const auto i: c10::irange(info.dims)) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar &alpha) { dim = maybe_wrap_dim(dim, self.dim()); TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4}; checkAllSameGPU(__func__, {self_arg, index_arg, source_arg}); TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector"); TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index"); TORCH_CHECK(self.scalar_type() == source.scalar_type(), "index_add_(): self and source must have the same scalar type"); TORCH_CHECK(dim == 0 || dim < source.dim(), "index_add_(): Indexing dim ", dim, " is out of bounds of tensor"); TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)), "index_add_(): Number of indices should be equal to self.size(dim)"); at::assert_no_internal_overlap(self); at::assert_no_overlap(self, index); at::assert_no_overlap(self, source); // Scalars are treated as 1-d tensor Tensor self_ = (self.dim() == 0) ? self.view(1) : self; Tensor source_ = (source.dim() == 0) ? source.view(1) : source; TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims"); TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" ); TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims"); at::assert_no_internal_overlap(self); at::assert_no_partial_overlap(self, index); at::assert_no_partial_overlap(self, source); if (globalContext().deterministicAlgorithms()){ torch::List<c10::optional<Tensor>> indices; indices.reserve(dim + 1); for (const auto i: c10::irange(dim)) { indices.emplace_back(); } indices.emplace_back(index.to(at::kLong)); return self.index_put_(indices, source * alpha, true); } // The `source` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of index we are choosing, which is the total size // of the tensor `index`. ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_); ptrdiff_t sourceTotalSize = source.numel(); int64_t selfAddDimSize = self_.size(dim); ptrdiff_t numIndex = index.numel(); if (sliceSize == 0) { return self; } const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); bool indContig = index.is_contiguous(); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \ C10_HIP_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sourceTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndex, \ selfAddDimSize, alpha_value); \ C10_HIP_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(ceil_div(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(sourceTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(source) && cuda::detail::canUse32BitIndexMath(index)) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); auto alpha_value = alpha.to<scalar_t>(); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { auto sourceInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); auto indexInfo = cuda::detail::getTensorInfo<index_t, unsigned int>(index); indexInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // index to choose if (numIndex <= 16) { if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim); if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); auto alpha_value = alpha.to<scalar_t>(); cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { cuda::detail::TensorInfo<index_t, uint64_t> indexInfo = cuda::detail::getTensorInfo<index_t, uint64_t>(index); indexInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); }); } return self; #undef SMALL_INDEX #undef LARGE_INDEX } namespace { // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexSelectLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType innerSize, int64_t srcSelectDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexSelectSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType totalSize, IndexType innerSize, int64_t srcSelectDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } namespace { // When using a 0-dim scalar tensor, we need the legacy (THC) semantics of // TensorInfo: Pretend that the scalar tensor is in fact a one-element vector. template <typename T, typename IndexType> cuda::detail::TensorInfo<T, IndexType> tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) { if (ti.dims == 0) { ti.dims = 1; ti.sizes[0] = 1; ti.strides[0] = 1; } return ti; } } template <typename scalar_t> void index_select_out_cuda_impl( Tensor& out, const Tensor& self, long dim, const Tensor& index) { ptrdiff_t numIndices = index.numel(); int selfDims = self.dim() == 0 ? 1 : self.dim(); const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); TORCH_CHECK( index.dim() <= 1, "Index is supposed to be an empty tensor or a vector"); TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds"); std::vector<int64_t> newSize = self.sizes().vec(); if (self.dim() > 0) { newSize[dim] = numIndices; } if (self.is_quantized()){ out = at::empty_quantized(newSize, out); } else { at::native::resize_output(out, newSize); } ptrdiff_t outTotalSize = out.numel(); if (outTotalSize == 0) { return; } bool indContig = index.is_contiguous(); // The `self` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim); ptrdiff_t sliceSize = outTotalSize / numIndices; int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \ selfSelectDimSize); \ C10_HIP_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \ static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \ selfSelectDimSize); \ C10_HIP_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(ceil_div(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(outTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(out) && cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(index)) { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index)); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim); if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); } else { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index)); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); } #undef SMALL_INDEX #undef LARGE_INDEX } } // anonymous namespace Tensor& index_select_out_cuda( const Tensor& self, int64_t dim, const Tensor& index, Tensor& out) { static constexpr string_view DIM_WARNING = "Tensor too large or too many (> 25) dimensions"; TORCH_CHECK( at::cuda::check_device({out, self, index}), "Input, output and indices must be on the current device"); at::assert_no_internal_overlap(out); at::assert_no_overlap(out, self); at::assert_no_overlap(out, index); dim = at::maybe_wrap_dim(dim, self); TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); if (self.is_quantized()){ TORCH_CHECK( self.qscheme() == kPerTensorAffine, "Only per_tensor quantized quantized tensors are supported by index_select.") AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, out.scalar_type(), "index_select_cuda", [&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); }); } return out; } Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) { Tensor out; if (self.is_quantized()){ TORCH_CHECK( self.qscheme() == kPerTensorAffine, "Only per_tensor quantized quantized tensors are supported by index_select.") out = at::empty_quantized({0}, self); } else { out = at::empty({0}, self.options()); } at::native::index_select_out_cuda(self, dim, index, out); return out; } namespace { template <typename mask_t> void masked_fill_kernel(TensorIterator& iter, const Scalar& value) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() { const auto value_ = value.to<scalar_t>(); gpu_kernel( iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t { if (mask) { return value_; } return self; }); }); } } // anonymous namespace Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) { TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ", mask.device(), " and self on ", self.device()); TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool, "expected mask dtype to be Bool but got ", mask.scalar_type()); auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); if (at::has_internal_overlap(self) == MemOverlap::YES) { TORCH_WARN( "Use of masked_fill_ on expanded tensors is deprecated. " "Please clone() the tensor before performing this operation. " "This also applies to advanced indexing e.g. tensor[mask] = scalar"); } at::assert_no_partial_overlap(self, mask); c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_"); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self) .add_input(self) .add_input(*b_mask) .build(); if (b_mask->dtype() == at::ScalarType::Byte) { TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \ "please use a mask with dtype torch.bool instead."); masked_fill_kernel<uint8_t>(iter, value); } else { masked_fill_kernel<bool>(iter, value); } namedinference::propagate_names_if_nonempty(self, maybe_outnames); return self; } Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) { TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor " "with ", value.dim(), " dimension(s)."); return masked_fill__cuda(self, mask, value.item()); } } // native } // at
6052f590a57b1678a766d7ac14a8c53875cac841.cu
#include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/native/IndexingUtils.h> #include <ATen/ATen.h> #include <ATen/ceil_div.h> #include <ATen/NativeFunctions.h> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/Resize.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAUtils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/cub.h> #include <c10/util/irange.h> #include <c10/core/QScheme.h> #include <limits> #include <c10/macros/Macros.h> namespace { template <typename scalar_t, int SZ> __global__ void indexing_backward_kernel( int64_t* sorted_indices, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t numel, int64_t stride, int64_t stride_before, int64_t outer_dim, bool accumulate) { //numel is total number of flattened indices, not expanded to dimensions that are not indexed. //stride is the cumulative size of the not-indexed last dimensions //stride_before is the stride of the dimension immediately preceding first indexed dimension //if indexing starts from the 0th dimension, stride_before does not matter because blockIdx.z will be 0 in this case //outer_dim is number of elements in the first unindexed dimensions using accscalar_t = at::acc_type<scalar_t, true>; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same destination index as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values processed by each thread (grain size) for (int64_t z = blockIdx.z; z < outer_dim; z += gridDim.z){ int64_t idx = blockIdx.x * blockDim.y + threadIdx.y; if (idx < numel && (idx == 0 || sorted_indices[idx] != sorted_indices[idx - 1])){ do { int64_t start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; // if not accumulate, we only keep the last duplicate index so skip those before it if (!accumulate && (idx < numel - 1) && sorted_indices[idx] == sorted_indices[idx + 1]) { idx++; continue; } const int64_t weight_row = ((int64_t) sorted_indices[idx]) * stride + z * stride_before; const int64_t grad_row = ((int64_t) indices[idx]) * stride + z * numel * stride; const accscalar_t scale = (accscalar_t)1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; while (start_feature < stride) { #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); if (accumulate) { weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { if (accumulate) { weight[ii] += gradient[ii] * scale; } else { weight[ii] = gradient[ii] * scale; } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int64_t feature_dim = start_feature + ii * C10_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } start_feature += gridDim.y * blockDim.x * SZ; } idx++; } while (idx < numel && sorted_indices[idx] == sorted_indices[idx - 1]); } } } } namespace at { namespace native { static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size, bool check_range=true) { //we don't need to check range in backward - if there were out of bounds indices forward should already have errored out if (index.numel() != 0 && check_range) { auto max_idx = index.max().item<int64_t>(); auto min_idx = index.min().item<int64_t>(); if (max_idx >= dim_size) { TORCH_CHECK_INDEX(false, "index ", max_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } if (min_idx < -dim_size) { TORCH_CHECK_INDEX(false, "index ", min_idx, " is out of bounds for dimension ", dim, " with size ", dim_size); } } return index.remainder(dim_size); } static std::vector<int64_t> computeLinearStride(const Tensor & tensor) { // computes the stride as if tensor were contiguous auto sizes = tensor.sizes(); std::vector<int64_t> stride(tensor.dim()); stride[tensor.dim() - 1] = 1; std::partial_sum(sizes.rbegin(), sizes.rend() - 1, stride.rbegin() + 1, std::multiplies<int64_t>()); return stride; } static std::tuple<Tensor, int64_t, int64_t, int64_t> computeLinearIndex(const Tensor & src, TensorList indices, bool check_range) { auto strides = computeLinearStride(src); const auto& device = src.options().device(); // Compute the linear index by multiplying the indexing tensors by the // stride and summing them. All the indexing tensors have the same shape at // this point. We also compute the number of dimensions before and after that // are not being index. Tensor linearIndex; int64_t emptyBefore = 0, emptyAfter = 0, nElemBefore = 1, nElemAfter = 1, strideBefore =0; for (const auto i: c10::irange(src.dim())) { if (indices[i].defined()) { // Cast index to the longType matching src's device // This allows us to support ie indexing a cuda tensor with a cpu tensor Tensor index = (wrapIndexOnce(indices[i], i, src.size(i), check_range) * strides[i]).to(device); if (linearIndex.defined()) { linearIndex += index; } else { linearIndex = index; if (i>0) { strideBefore = src.stride(i-1); // stride after undefined dimensions } } } else if (linearIndex.defined()) { emptyAfter++; nElemAfter *= src.size(i); } else { emptyBefore++; nElemBefore *= src.size(i); } } return std::make_tuple(std::move(linearIndex), nElemBefore, strideBefore, nElemAfter); } static std::tuple<Tensor, Tensor, int64_t, int64_t, int64_t, std::vector<int64_t>> makeLinearIndex(Tensor self, const c10::List<c10::optional<at::Tensor>>& orig, bool check_range) { checkIndexTensorTypes(orig); // first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors auto indices = expandTensors(self, orig); // next broadcast all index tensors together indices = expand_outplace(indices); // add missing null Tensors so that it matches self.dim() while (indices.size() < (size_t)self.dim()) { indices.emplace_back(); } // if the non-null indices are not all adjacent, transpose self and indices // together so that they're adjacent at the front std::vector<int64_t> inversePerm; if (!hasContiguousSubspace(indices)) { std::tie(self, indices, inversePerm) = transposeToFrontAndInvPerm(self, indices); } int64_t nElemBefore, strideBefore, nElemAfter; Tensor linearIndex; std::tie(linearIndex, nElemBefore, strideBefore, nElemAfter) = computeLinearIndex(self, indices, check_range); return std::make_tuple(linearIndex, self, nElemBefore, strideBefore, nElemAfter, inversePerm); } void index_put_with_sort_kernel_thrust_helper(Tensor &linearIndex, Tensor &orig_indices, Tensor &sorted_indices, int64_t num_indices); namespace { int64_t largestIndex(const Tensor &self) { int64_t result = 0; for (const auto i: c10::irange(self.dim())) { result += (self.sizes()[i] - 1) * self.strides()[i]; } return result; } void index_put_with_sort_kernel(Tensor & self, const c10::List<c10::optional<Tensor>>& indices, const Tensor & value, bool accumulate, bool unsafe) { if (indices.size() > (size_t)self.dim()) { TORCH_CHECK_INDEX(false, "too many indices for tensor of dimension ", self.dim(), " (got ", indices.size(), ")"); } if (!self.is_contiguous()) { self = self.contiguous(); } Tensor linearIndex, src, expandedValue = value; int64_t nElemBefore, strideBefore, sliceSize; std::vector<int64_t> inversePerm; std::tie(linearIndex, src, nElemBefore, strideBefore, sliceSize, inversePerm) = makeLinearIndex(self, indices, !unsafe); int64_t num_indices = linearIndex.numel(); if (expandedValue.numel() < num_indices * nElemBefore * sliceSize) { auto expanded_size = at::DimVector(expandedValue.sizes()); auto size1 = expandedValue.sizes(); auto size2 = linearIndex.sizes(); if (are_expandable(size1, size2)) { expanded_size = infer_size_dimvector(size1, size2); } if (nElemBefore > 1) { expanded_size.insert(expanded_size.begin(), nElemBefore); } expandedValue = expandedValue.expand(expanded_size); } expandedValue = expandedValue.contiguous(); if (num_indices > 0 && sliceSize > 0) { const bool permuted = !src.is_contiguous(); auto src_ = permuted ? src.contiguous() : src; linearIndex = linearIndex.reshape(-1); auto sorted_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto orig_indices = at::empty_like(linearIndex, LEGACY_CONTIGUOUS_MEMORY_FORMAT); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); linearIndex.divide_(sliceSize, "trunc"); // cub on CUDA <= 11.2 have a bug that for small sizes // cub's sort can be much slower than thrust's merge sort // this bug is fixed in CUDA 11.3 #if (defined(CUDA_VERSION) && CUDA_VERSION < 11030) || defined(USE_ROCM) if (num_indices < 50000) { index_put_with_sort_kernel_thrust_helper(linearIndex, orig_indices, sorted_indices, num_indices); } else #endif { // Sort the inputs into sorted with the corresponding indices auto range = at::arange(num_indices, linearIndex.options()); // linearIndex can not be negative, and we take advantage of this // fact to sort on less bits for better performance. int64_t nbits = cuda::cub::get_num_bits(largestIndex(self) / sliceSize); cuda::cub::radix_sort_pairs( linearIndex.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), range.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(), num_indices, false, 0, nbits); } TORCH_INTERNAL_ASSERT( linearIndex.numel()*sliceSize*nElemBefore == expandedValue.numel(), "number of flattened indices did not match number of elements in the value tensor: ", linearIndex.numel()*sliceSize*nElemBefore, " vs ", expandedValue.numel()); const int UNROLL = 4; const int indices_per_block = 4; dim3 grid(ceil_div(num_indices, (int64_t) indices_per_block), std::min<int>(at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(sliceSize, (int64_t) (C10_WARP_SIZE*UNROLL))), std::min(std::max<int>(1,nElemBefore), at::cuda::getCurrentDeviceProperties()->maxGridSize[2])); dim3 block(C10_WARP_SIZE, indices_per_block); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, expandedValue.scalar_type(), "indexing_backward", [&] { indexing_backward_kernel<scalar_t, UNROLL><<<grid, block, 0, stream>>>( sorted_indices.data_ptr<int64_t>(), orig_indices.data_ptr<int64_t>(), expandedValue.data_ptr<scalar_t>(), src_.data_ptr<scalar_t>(), num_indices, sliceSize, strideBefore, nElemBefore, accumulate); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); if (permuted) { self.copy_(src_.permute(inversePerm)); } } } REGISTER_CUDA_DISPATCH(index_put_with_sort_stub, &index_put_with_sort_kernel); } //anonymous // Check tensor dimensions for index operations, and return the slice size. static ptrdiff_t getSliceSize(const Tensor & dst, int dim, const Tensor & index, const Tensor & src) { const auto dstDims = dst.dim(); const auto srcDims = src.dim(); TORCH_CHECK(index.dim() <= 1, "Index must be vector or scalar"); ptrdiff_t dstSliceSize = 1; TORCH_CHECK(dim >= 0 && dim < dstDims, "Indexing dim ", dim, " is out of bounds"); for (const auto d: c10::irange(dstDims)) { if (d != dim) { dstSliceSize *= dst.size(d); } } TORCH_CHECK(dim < srcDims, "Indexing dim ", dim, " is out of bounds"); TORCH_CHECK(index.numel() == src.size(dim), "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (const auto d: c10::irange(srcDims)) { if (d != dim) { srcSliceSize *= src.size(d); if (!mismatch && dst.size(d) != src.size(d)) mismatch = true; } } TORCH_CHECK(dstSliceSize == srcSliceSize, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { TORCH_WARN_ONCE( "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } return dstSliceSize; } // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexAddLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexAddSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType innerSize, int64_t dstAddDimSize, T alpha) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) { // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha); } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexAddSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexAddLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstAddDim, int srcAddDim, IndexType totalSize, IndexType innerSize, int64_t dstAddDimSize, T alpha) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType srcIndex, elementInSlice; if (IndexIsMajor) { srcIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; srcIndex = linearIndex % innerSize; } // Lua indices begin at 1 IndexType dstIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(srcIndex, indices)]; CUDA_KERNEL_ASSERT(dstIndex < dstAddDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstAddDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcAddDim]; gpuAtomicAddNoReturn(&dst.data[dstOffset], src.data[srcOffset] * alpha); } } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. template <typename scalar_t> bool indexShouldBeMajor(cuda::detail::TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (const auto i: c10::irange(info.dims)) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } Tensor& index_add_cuda_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source, const Scalar &alpha) { dim = maybe_wrap_dim(dim, self.dim()); TensorArg self_arg{self, "self", 1}, index_arg{index, "index", 3}, source_arg{source, "source", 4}; checkAllSameGPU(__func__, {self_arg, index_arg, source_arg}); TORCH_CHECK_INDEX(index.dim() <= 1, "index_add_(): Index is supposed to be a vector"); TORCH_CHECK(index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "index_add_(): Expected dtype int32/int64 for index"); TORCH_CHECK(self.scalar_type() == source.scalar_type(), "index_add_(): self and source must have the same scalar type"); TORCH_CHECK(dim == 0 || dim < source.dim(), "index_add_(): Indexing dim ", dim, " is out of bounds of tensor"); TORCH_CHECK(index.numel() == (source.dim() == 0 ? 1 : source.size(dim)), "index_add_(): Number of indices should be equal to self.size(dim)"); at::assert_no_internal_overlap(self); at::assert_no_overlap(self, index); at::assert_no_overlap(self, source); // Scalars are treated as 1-d tensor Tensor self_ = (self.dim() == 0) ? self.view(1) : self; Tensor source_ = (source.dim() == 0) ? source.view(1) : source; TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims"); TORCH_CHECK(source.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims" ); TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, "tensor has too many (>", MAX_TENSORINFO_DIMS, ") dims"); at::assert_no_internal_overlap(self); at::assert_no_partial_overlap(self, index); at::assert_no_partial_overlap(self, source); if (globalContext().deterministicAlgorithms()){ torch::List<c10::optional<Tensor>> indices; indices.reserve(dim + 1); for (const auto i: c10::irange(dim)) { indices.emplace_back(); } indices.emplace_back(index.to(at::kLong)); return self.index_put_(indices, source * alpha, true); } // The `source` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of index we are choosing, which is the total size // of the tensor `index`. ptrdiff_t sliceSize = getSliceSize(self_, dim, index, source_); ptrdiff_t sourceTotalSize = source.numel(); int64_t selfAddDimSize = self_.size(dim); ptrdiff_t numIndex = index.numel(); if (sliceSize == 0) { return self; } const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); bool indContig = index.is_contiguous(); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM) \ indexAddSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, SELF_DIM, SOURCE_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sliceSize, selfAddDimSize, alpha_value); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexAddLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ SELF_DIM, SOURCE_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ selfInfo, sourceInfo, indexInfo, \ selfAddDim, sourceAddDim, sourceTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndex, \ selfAddDimSize, alpha_value); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(std::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(ceil_div(sourceTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(sourceTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(source) && cuda::detail::canUse32BitIndexMath(index)) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, unsigned int> selfInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); auto alpha_value = alpha.to<scalar_t>(); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { auto sourceInfo = cuda::detail::getTensorInfo<scalar_t, unsigned int>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); auto indexInfo = cuda::detail::getTensorInfo<index_t, unsigned int>(index); indexInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // index to choose if (numIndex <= 16) { if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(selfInfo, selfAddDim); if (selfInfo.dims == 1 && sourceInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (selfInfo.dims == 2 && sourceInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (selfInfo.dims == 3 && sourceInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "index_add", [&] { cuda::detail::TensorInfo<scalar_t, uint64_t> selfInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self_); int selfAddDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfAddDim); auto alpha_value = alpha.to<scalar_t>(); cuda::detail::TensorInfo<scalar_t, uint64_t> sourceInfo = cuda::detail::getTensorInfo<scalar_t, uint64_t>(source_); int sourceAddDim = sourceInfo.collapseDims(dim); sourceInfo.reduceDim(sourceAddDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_add_cuda_", [&] () { cuda::detail::TensorInfo<index_t, uint64_t> indexInfo = cuda::detail::getTensorInfo<index_t, uint64_t>(index); indexInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); }); } return self; #undef SMALL_INDEX #undef LARGE_INDEX } namespace { // We prefer this kernel to avoid reloading index points if the number // of indices is a small number. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is large, then the // indexSelectLargeIndex kernel is a better choice to increase // parallelism. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim> __global__ void indexSelectSmallIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType innerSize, int64_t srcSelectDimSize) { // In order to avoid reloading the index that we are copying, load // it once to handle all of the points that are being selected, so // it can be reused as much as possible. This kernel is chosen when // this is a good choice (small number of chosen indices), since // re-accessing indices in addition to src elements can be slow. for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) { IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); // We stride over the output ignoring the indexed dimension // (innerSize), whose offset calculation is handled differently for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < innerSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } } // We prefer this kernel to balance parallelism across index points, // if there are a large number of indices. // This kernel in fact works for all choices of problem size, but if // the number of indices chosen is small, then the // indexSelectSmallIndex kernel is a better choice to reduce memory // accesses. template <typename T, typename IndicesType, typename IndexType, int DstDim, int SrcDim, int IdxDim, bool IndexIsMajor> __global__ void indexSelectLargeIndex(cuda::detail::TensorInfo<T, IndexType> dst, cuda::detail::TensorInfo<T, IndexType> src, cuda::detail::TensorInfo<IndicesType, IndexType> indices, int dstSelectDim, int srcSelectDim, IndexType totalSize, IndexType innerSize, int64_t srcSelectDimSize) { // We stride over the output including the indexed dimension // (totalSize), and calculate the destination index point based on that for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalSize; linearIndex += gridDim.x * blockDim.x) { IndexType dstIndex, elementInSlice; if (IndexIsMajor) { dstIndex = linearIndex / innerSize; elementInSlice = linearIndex % innerSize; } else { elementInSlice = linearIndex / innerSize; dstIndex = linearIndex % innerSize; } IndexType srcIndex = indices.data[cuda::detail::IndexToOffset<IndicesType, IndexType, IdxDim>::get(dstIndex, indices)]; CUDA_KERNEL_ASSERT(srcIndex < srcSelectDimSize); IndexType dstOffset = cuda::detail::IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst); dstOffset += dstIndex * dst.strides[dstSelectDim]; IndexType srcOffset = cuda::detail::IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src); srcOffset += srcIndex * src.strides[srcSelectDim]; dst.data[dstOffset] = src.data[srcOffset]; } } namespace { // When using a 0-dim scalar tensor, we need the legacy (THC) semantics of // TensorInfo: Pretend that the scalar tensor is in fact a one-element vector. template <typename T, typename IndexType> cuda::detail::TensorInfo<T, IndexType> tensorInfoLegacyIfScalar(cuda::detail::TensorInfo<T, IndexType> ti) { if (ti.dims == 0) { ti.dims = 1; ti.sizes[0] = 1; ti.strides[0] = 1; } return ti; } } template <typename scalar_t> void index_select_out_cuda_impl( Tensor& out, const Tensor& self, long dim, const Tensor& index) { ptrdiff_t numIndices = index.numel(); int selfDims = self.dim() == 0 ? 1 : self.dim(); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); TORCH_CHECK( index.dim() <= 1, "Index is supposed to be an empty tensor or a vector"); TORCH_CHECK(dim < selfDims, "Indexing dim is out of bounds"); std::vector<int64_t> newSize = self.sizes().vec(); if (self.dim() > 0) { newSize[dim] = numIndices; } if (self.is_quantized()){ out = at::empty_quantized(newSize, out); } else { at::native::resize_output(out, newSize); } ptrdiff_t outTotalSize = out.numel(); if (outTotalSize == 0) { return; } bool indContig = index.is_contiguous(); // The `self` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t selfSelectDimSize = self.dim() == 0 ? 1 : self.size(dim); ptrdiff_t sliceSize = outTotalSize / numIndices; int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexSelectSmallIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(sliceSize), \ selfSelectDimSize); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); #define LARGE_INDEX(TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexSelectLargeIndex<TENSOR_TYPE, INDICES_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ outInfo, selfInfo, indicesInfo, \ outSelectDim, selfSelectDim, static_cast<TYPE>(outTotalSize), \ static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \ selfSelectDimSize); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); dim3 smallIndexGrid(std::min(ceil_div(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(ceil_div(outTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(outTotalSize, (ptrdiff_t)128)); if (cuda::detail::canUse32BitIndexMath(out) && cuda::detail::canUse32BitIndexMath(self) && cuda::detail::canUse32BitIndexMath(index)) { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, unsigned int>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, unsigned int>(index)); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2); } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = indexShouldBeMajor(outInfo, outSelectDim); if (outInfo.dims == 1 && selfInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, index_t, unsigned int, 1, 1, -2, true); } else if (outInfo.dims == 2 && selfInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 2, 2, -2, false); } } else if (outInfo.dims == 3 && selfInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, index_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, index_t, unsigned int, -1, -1, -1, true); } } }); } else { auto outInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(out)); int outSelectDim = outInfo.collapseDims(dim); outInfo.reduceDim(outSelectDim); auto selfInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<scalar_t, uint64_t>(self)); int selfSelectDim = selfInfo.collapseDims(dim); selfInfo.reduceDim(selfSelectDim); AT_DISPATCH_INDEX_TYPES(index.scalar_type(), "index_select_out_cuda_impl", [&] () { auto indicesInfo = tensorInfoLegacyIfScalar(cuda::detail::getTensorInfo<index_t, uint64_t>(index)); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, index_t, uint64_t, -1, -1, -1, true); }); } #undef SMALL_INDEX #undef LARGE_INDEX } } // anonymous namespace Tensor& index_select_out_cuda( const Tensor& self, int64_t dim, const Tensor& index, Tensor& out) { static constexpr string_view DIM_WARNING = "Tensor too large or too many (> 25) dimensions"; TORCH_CHECK( at::cuda::check_device({out, self, index}), "Input, output and indices must be on the current device"); at::assert_no_internal_overlap(out); at::assert_no_overlap(out, self); at::assert_no_overlap(out, index); dim = at::maybe_wrap_dim(dim, self); TORCH_CHECK(self.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); TORCH_CHECK(index.dim() <= MAX_TENSORINFO_DIMS, DIM_WARNING); if (self.is_quantized()){ TORCH_CHECK( self.qscheme() == kPerTensorAffine, "Only per_tensor quantized quantized tensors are supported by index_select.") AT_DISPATCH_QINT_TYPES(out.scalar_type(), "index_select_quant_cuda", [&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, out.scalar_type(), "index_select_cuda", [&] { index_select_out_cuda_impl<scalar_t>(out, self, dim, index); }); } return out; } Tensor index_select_cuda(const Tensor& self, int64_t dim, const Tensor& index) { Tensor out; if (self.is_quantized()){ TORCH_CHECK( self.qscheme() == kPerTensorAffine, "Only per_tensor quantized quantized tensors are supported by index_select.") out = at::empty_quantized({0}, self); } else { out = at::empty({0}, self.options()); } at::native::index_select_out_cuda(self, dim, index, out); return out; } namespace { template <typename mask_t> void masked_fill_kernel(TensorIterator& iter, const Scalar& value) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( kBool, kHalf, kBFloat16, iter.common_dtype(), "masked_fill_", [&]() { const auto value_ = value.to<scalar_t>(); gpu_kernel( iter, [value_] GPU_LAMBDA(scalar_t self, mask_t mask) -> scalar_t { if (mask) { return value_; } return self; }); }); } } // anonymous namespace Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Scalar& value) { TORCH_CHECK(self.device() == mask.device(), "expected self and mask to be on the same device, but got mask on ", mask.device(), " and self on ", self.device()); TORCH_CHECK(mask.scalar_type() == kByte || mask.scalar_type() == kBool, "expected mask dtype to be Bool but got ", mask.scalar_type()); auto maybe_outnames = namedinference::broadcast_to_outnames(self, mask, "masked_fill_"); if (at::has_internal_overlap(self) == MemOverlap::YES) { TORCH_WARN( "Use of masked_fill_ on expanded tensors is deprecated. " "Please clone() the tensor before performing this operation. " "This also applies to advanced indexing e.g. tensor[mask] = scalar"); } at::assert_no_partial_overlap(self, mask); c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_fill_"); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self) .add_input(self) .add_input(*b_mask) .build(); if (b_mask->dtype() == at::ScalarType::Byte) { TORCH_WARN("masked_fill_ received a mask with dtype torch.uint8, this behavior is now deprecated," \ "please use a mask with dtype torch.bool instead."); masked_fill_kernel<uint8_t>(iter, value); } else { masked_fill_kernel<bool>(iter, value); } namedinference::propagate_names_if_nonempty(self, maybe_outnames); return self; } Tensor & masked_fill__cuda(Tensor& self, const Tensor & mask, const Tensor & value) { TORCH_CHECK(value.dim() == 0, "masked_fill_ only supports a 0-dimensional value tensor, but got tensor " "with ", value.dim(), " dimension(s)."); return masked_fill__cuda(self, mask, value.item()); } } // native } // at
6b74f24b6523d2fc32b40be164920606460fd06b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include"pathalg.h" static const int WORK_SIZE =258; __global__ void BFShigh(int t,int *m,int *st,int *te,int *d,int *chan,int round,int edgesize,int nodenum) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=edgesize)return; int from=st[i]; if (chan[from]<0)return; chan[from]=-1; int to=te[i]; d[to]=round; if((to%nodenum)/(WD+1)==t)*m=1; } __global__ void initchan(int s,int *chan,int *d,int *pred,int nodenum) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=nodenum*LY)return; int bi=i%nodenum; int W=WD+1; chan[i]=(bi/W==s)?1:-1; d[i]=(bi/W==s)?0:inf; pred[i]=d[i]; } __global__ void chanchan(int *m,int *pred,int *d,int *chan,int nodenum) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=nodenum*LY)return; chan[i]=-1; if(d[i]<pred[i]) { chan[i]=1; pred[i]=d[i]; } } void parallelor::copydata(int s,vector<edge>&edges,int nodenum){ memset(pre,-1,sizeof(int)*nodenum); *m=0; for(int i=0;i<nodenum;i++) d[i]=INT_MAX/2; d[s]=0; for(int i=0;i<edges.size();i++) aedges[i]=edges[i]; hipMemcpy(dev_edges,aedges,edges.size()* sizeof(edge),hipMemcpyHostToDevice); hipMemcpy(dev_m,m,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_d,d,sizeof(int)*nodenum,hipMemcpyHostToDevice); hipMemcpy(dev_pre,pre,sizeof(int)*nodenum,hipMemcpyHostToDevice); }; void parallelor::dellocate(){ /*delete[]d; delete[]pre; delete[]aedges; delete m; hipFree(dev_edges); hipFree(dev_m); hipFree(dev_d); hipFree(dev_pre);*/ }; void parallelor::allocate(int maxn,int maxedge){ m=new int; d=new int[maxn],pre=new int[maxn]; aedges=new edge[maxedge]; hipMalloc(&dev_edges, sizeof(edge)*maxedge); hipMalloc((void**)&dev_d,maxn*sizeof(int)); hipMalloc((void**)&dev_pre,maxn*sizeof(int)); hipMemcpy(duan,dev_duan,duansize*sizeof(int),hipMemcpyDeviceToHost); hipMalloc((void**)&dev_m,sizeof(int)); } bool parallelor::cutcake(int index){ cout<<"cut "<<index<<endl; if(maxbw-(index+1)*10>=0) maxbw-=(index+1)*10; else { cout<<"failure"<<endl; return false; } hleveln[index]++; return true; }; void parallelor::topsort() { cout<<" in top sort "<<endl; queue<int>zero; vector<int>order(nodenum*LY,-1); for(int i=0;i<nodenum*LY;i++) zero.push(i); int biao=0; while(!zero.empty()) { int node=zero.front(); zero.pop(); order[node]=biao++; for(int i=0;i<neibn[node].size();i++) { if((--ancestor[neibn[node][i]])==0) zero.push(neibn[node][i]); } } vector<pair<int,int>>tmp; for(int i=0;i<order.size();i++) tmp.push_back(make_pair(i,order[i])); //sort(tmp.begin(),tmp.end(),pairless()); for(int i=0;i<order.size();i++) ordernode.push_back(tmp[i].first); }; void parallelor::init(vector<edge>&extenedges,vector<vector<int>>&relate,ginfo ginf){ cout<<"in cuda init"<<endl; nodenum=ginf.enodesize; edges=extenedges; mark=new int; *mark=0; W=WD+1; int *d,*dev_d,*pred,*dev_pred; st=new int[2*WD*edges.size()*LY]; te=new int[2*WD*edges.size()*LY]; chan=new int[nodenum*LY]; d=new int[nodenum*LY]; pred=new int[nodenum*LY]; vector<vector<int>>nein(nodenum*LY,vector<int>()); vector<int>as(nodenum*LY,0); ancestor=as; neibn=nein; cout<<"gsdfs"<<endl; for(int k=0;k<LY;k++) { int startn=k*nodenum; for(int i=0;i<edges.size();i++) for(int j=0;j<W-1;j++) { int s=edges[i].s*W+j+startn; int t=edges[i].t*W+j+1+startn; ancestor[t]++; neibn[s].push_back(t); neibn[t].push_back(s); } } cout<<"before sort "<<endl; topsort(); int count=0; cout<<"sort out "<<endl; for(int i=0;i<nodenum*LY;i++) for(int j=0;j<neibn[ordernode[i]].size();j++) { st[count]=ordernode[i]; te[count]=neibn[ordernode[i]][j]; count++; } cout<<"asdasd"<<endl; for(int i=0;i<nodenum*LY;i++) { chan[i]=-1; d[i]=INT_MAX/2; pred[i]=d[i]; } cout<<"hrerr"<<endl; hipMalloc((void**)&dev_chan,nodenum*LY*sizeof(int)); hipMalloc((void**)&dev_st,LY*WD*edges.size()*sizeof(int)); hipMalloc((void**)&dev_te,LY*WD*edges.size()*sizeof(int)); hipMalloc((void**)&dev_d,LY*nodenum*sizeof(int)); hipMalloc((void**)&dev_pred,LY*nodenum*sizeof(int)); hipMalloc((void**)&dev_mark,sizeof(int)); hipMemcpy(dev_chan,chan,nodenum*LY*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_te,te,LY*WD*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_st,st,LY*WD*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_d,d,LY*nodenum*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_pred,pred,LY*nodenum*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_mark,mark,sizeof(int),hipMemcpyHostToDevice); cout<<"get out"<<endl; }; void parallelor::initprepush(vector<edge>&extenedges,vector<vector<int>>&relate,ginfo ginf){ cout<<"in cuda init"<<endl; maxbw=500; //allocate in cuda nodenum=ginf.enodesize; edges=extenedges; cout<<"out cuda init"<<endl; } parallelor::parallelor() { }; vector<int> parallelor:: routalg(int s,int t,int bw) { cout<<"blasting "<<endl; int E=2*edges.size()*WD*LY; int kk=1; for(int i=0;i<1;i++) { *mark=0; initchan<< <(nodenum*LY/WORK_SIZE)+1, WORK_SIZE >> >(s,dev_chan,dev_d,dev_pred,nodenum); hipMemcpy(dev_m,&mark, sizeof(int), hipMemcpyHostToDevice); do{ hipMemcpy(chan,dev_chan,nodenum*sizeof(int), hipMemcpyDeviceToHost); int cc=0; BFShigh << <(E/WORK_SIZE)+1, WORK_SIZE >> >(t,dev_m,dev_st,dev_te,dev_d,dev_chan,kk,E,nodenum); chanchan<< <(nodenum*LY/WORK_SIZE)+1, WORK_SIZE >> >(dev_m,dev_pred,dev_d,dev_chan,nodenum); hipMemcpy(mark, dev_m, sizeof(int), hipMemcpyDeviceToHost); kk++; } while(*mark==0); cout<<"out here is !"<<endl; //cout<<"kk is: "<<kk<<endl; } cout<<"out routalg"<<endl; return vector<int>(); }; int fls(int x) { int position; int i; if(x!=0) for(i=(x>>1),position=0;i!=0;++position) i>>=1; else position=-1; return pow(2,position+1); }
6b74f24b6523d2fc32b40be164920606460fd06b.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include"pathalg.h" static const int WORK_SIZE =258; __global__ void BFShigh(int t,int *m,int *st,int *te,int *d,int *chan,int round,int edgesize,int nodenum) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=edgesize)return; int from=st[i]; if (chan[from]<0)return; chan[from]=-1; int to=te[i]; d[to]=round; if((to%nodenum)/(WD+1)==t)*m=1; } __global__ void initchan(int s,int *chan,int *d,int *pred,int nodenum) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=nodenum*LY)return; int bi=i%nodenum; int W=WD+1; chan[i]=(bi/W==s)?1:-1; d[i]=(bi/W==s)?0:inf; pred[i]=d[i]; } __global__ void chanchan(int *m,int *pred,int *d,int *chan,int nodenum) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>=nodenum*LY)return; chan[i]=-1; if(d[i]<pred[i]) { chan[i]=1; pred[i]=d[i]; } } void parallelor::copydata(int s,vector<edge>&edges,int nodenum){ memset(pre,-1,sizeof(int)*nodenum); *m=0; for(int i=0;i<nodenum;i++) d[i]=INT_MAX/2; d[s]=0; for(int i=0;i<edges.size();i++) aedges[i]=edges[i]; cudaMemcpy(dev_edges,aedges,edges.size()* sizeof(edge),cudaMemcpyHostToDevice); cudaMemcpy(dev_m,m,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_d,d,sizeof(int)*nodenum,cudaMemcpyHostToDevice); cudaMemcpy(dev_pre,pre,sizeof(int)*nodenum,cudaMemcpyHostToDevice); }; void parallelor::dellocate(){ /*delete[]d; delete[]pre; delete[]aedges; delete m; cudaFree(dev_edges); cudaFree(dev_m); cudaFree(dev_d); cudaFree(dev_pre);*/ }; void parallelor::allocate(int maxn,int maxedge){ m=new int; d=new int[maxn],pre=new int[maxn]; aedges=new edge[maxedge]; cudaMalloc(&dev_edges, sizeof(edge)*maxedge); cudaMalloc((void**)&dev_d,maxn*sizeof(int)); cudaMalloc((void**)&dev_pre,maxn*sizeof(int)); cudaMemcpy(duan,dev_duan,duansize*sizeof(int),cudaMemcpyDeviceToHost); cudaMalloc((void**)&dev_m,sizeof(int)); } bool parallelor::cutcake(int index){ cout<<"cut "<<index<<endl; if(maxbw-(index+1)*10>=0) maxbw-=(index+1)*10; else { cout<<"failure"<<endl; return false; } hleveln[index]++; return true; }; void parallelor::topsort() { cout<<" in top sort "<<endl; queue<int>zero; vector<int>order(nodenum*LY,-1); for(int i=0;i<nodenum*LY;i++) zero.push(i); int biao=0; while(!zero.empty()) { int node=zero.front(); zero.pop(); order[node]=biao++; for(int i=0;i<neibn[node].size();i++) { if((--ancestor[neibn[node][i]])==0) zero.push(neibn[node][i]); } } vector<pair<int,int>>tmp; for(int i=0;i<order.size();i++) tmp.push_back(make_pair(i,order[i])); //sort(tmp.begin(),tmp.end(),pairless()); for(int i=0;i<order.size();i++) ordernode.push_back(tmp[i].first); }; void parallelor::init(vector<edge>&extenedges,vector<vector<int>>&relate,ginfo ginf){ cout<<"in cuda init"<<endl; nodenum=ginf.enodesize; edges=extenedges; mark=new int; *mark=0; W=WD+1; int *d,*dev_d,*pred,*dev_pred; st=new int[2*WD*edges.size()*LY]; te=new int[2*WD*edges.size()*LY]; chan=new int[nodenum*LY]; d=new int[nodenum*LY]; pred=new int[nodenum*LY]; vector<vector<int>>nein(nodenum*LY,vector<int>()); vector<int>as(nodenum*LY,0); ancestor=as; neibn=nein; cout<<"gsdfs"<<endl; for(int k=0;k<LY;k++) { int startn=k*nodenum; for(int i=0;i<edges.size();i++) for(int j=0;j<W-1;j++) { int s=edges[i].s*W+j+startn; int t=edges[i].t*W+j+1+startn; ancestor[t]++; neibn[s].push_back(t); neibn[t].push_back(s); } } cout<<"before sort "<<endl; topsort(); int count=0; cout<<"sort out "<<endl; for(int i=0;i<nodenum*LY;i++) for(int j=0;j<neibn[ordernode[i]].size();j++) { st[count]=ordernode[i]; te[count]=neibn[ordernode[i]][j]; count++; } cout<<"asdasd"<<endl; for(int i=0;i<nodenum*LY;i++) { chan[i]=-1; d[i]=INT_MAX/2; pred[i]=d[i]; } cout<<"hrerr"<<endl; cudaMalloc((void**)&dev_chan,nodenum*LY*sizeof(int)); cudaMalloc((void**)&dev_st,LY*WD*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_te,LY*WD*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_d,LY*nodenum*sizeof(int)); cudaMalloc((void**)&dev_pred,LY*nodenum*sizeof(int)); cudaMalloc((void**)&dev_mark,sizeof(int)); cudaMemcpy(dev_chan,chan,nodenum*LY*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_te,te,LY*WD*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_st,st,LY*WD*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_d,d,LY*nodenum*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_pred,pred,LY*nodenum*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_mark,mark,sizeof(int),cudaMemcpyHostToDevice); cout<<"get out"<<endl; }; void parallelor::initprepush(vector<edge>&extenedges,vector<vector<int>>&relate,ginfo ginf){ cout<<"in cuda init"<<endl; maxbw=500; //allocate in cuda nodenum=ginf.enodesize; edges=extenedges; cout<<"out cuda init"<<endl; } parallelor::parallelor() { }; vector<int> parallelor:: routalg(int s,int t,int bw) { cout<<"blasting "<<endl; int E=2*edges.size()*WD*LY; int kk=1; for(int i=0;i<1;i++) { *mark=0; initchan<< <(nodenum*LY/WORK_SIZE)+1, WORK_SIZE >> >(s,dev_chan,dev_d,dev_pred,nodenum); cudaMemcpy(dev_m,&mark, sizeof(int), cudaMemcpyHostToDevice); do{ cudaMemcpy(chan,dev_chan,nodenum*sizeof(int), cudaMemcpyDeviceToHost); int cc=0; BFShigh << <(E/WORK_SIZE)+1, WORK_SIZE >> >(t,dev_m,dev_st,dev_te,dev_d,dev_chan,kk,E,nodenum); chanchan<< <(nodenum*LY/WORK_SIZE)+1, WORK_SIZE >> >(dev_m,dev_pred,dev_d,dev_chan,nodenum); cudaMemcpy(mark, dev_m, sizeof(int), cudaMemcpyDeviceToHost); kk++; } while(*mark==0); cout<<"out here is !"<<endl; //cout<<"kk is: "<<kk<<endl; } cout<<"out routalg"<<endl; return vector<int>(); }; int fls(int x) { int position; int i; if(x!=0) for(i=(x>>1),position=0;i!=0;++position) i>>=1; else position=-1; return pow(2,position+1); }
e2518489eaff1c46226da04a1180ad25f0963114.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <vector> #include "random/rng.h" #include "test_utils.h" #include <cuda_utils.h> #include "ml_utils.h" #include "tsvd/tsvd.h" namespace ML { using namespace MLCommon; template<typename T> struct TsvdInputs { T tolerance; int len; int n_row; int n_col; int len2; int n_row2; int n_col2; unsigned long long int seed; int algo; }; template<typename T> ::std::ostream& operator<<(::std::ostream& os, const TsvdInputs<T>& dims) { return os; } template<typename T> class TsvdTest: public ::testing::TestWithParam<TsvdInputs<T> > { protected: void basicTest() { hipblasHandle_t cublas_handle; CUBLAS_CHECK(hipblasCreate(&cublas_handle)); hipsolverDnHandle_t cusolver_handle = NULL; CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle)); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); params = ::testing::TestWithParam<TsvdInputs<T>>::GetParam(); Random::Rng r(params.seed, MLCommon::Random::GenTaps); int len = params.len; allocate(data, len); std::vector<T> data_h = { 1.0, 2.0, 4.0, 2.0, 4.0, 5.0, 5.0, 4.0, 2.0, 1.0, 6.0, 4.0 }; data_h.resize(len); updateDevice(data, data_h.data(), len, stream); int len_comp = params.n_col * params.n_col; allocate(components, len_comp); allocate(singular_vals, params.n_col); std::vector<T> components_ref_h = { -0.3951, 0.1532, 0.9058, -0.7111, -0.6752, -0.1959, -0.5816, 0.7215, -0.3757 }; components_ref_h.resize(len_comp); allocate(components_ref, len_comp); updateDevice(components_ref, components_ref_h.data(), len_comp, stream); paramsTSVD prms; prms.n_cols = params.n_col; prms.n_rows = params.n_row; prms.n_components = params.n_col; if (params.algo == 0) prms.algorithm = solver::COV_EIG_DQ; else prms.algorithm = solver::COV_EIG_JACOBI; tsvdFit(data, components, singular_vals, prms, cublas_handle, cusolver_handle, stream); CUBLAS_CHECK(hipblasDestroy(cublas_handle)); CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle)); CUDA_CHECK(hipStreamDestroy(stream)); } void advancedTest() { hipblasHandle_t cublas_handle; CUBLAS_CHECK(hipblasCreate(&cublas_handle)); hipsolverDnHandle_t cusolver_handle = NULL; CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle)); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); params = ::testing::TestWithParam<TsvdInputs<T>>::GetParam(); Random::Rng r(params.seed, MLCommon::Random::GenTaps); int len = params.len2; paramsTSVD prms; prms.n_cols = params.n_col2; prms.n_rows = params.n_row2; prms.n_components = params.n_col2; if (params.algo == 0) prms.algorithm = solver::COV_EIG_DQ; else if (params.algo == 1) prms.algorithm = solver::COV_EIG_JACOBI; else if (params.algo == 2) { prms.algorithm = solver::RANDOMIZED; prms.n_components = params.n_col2 - 15; } allocate(data2, len); r.uniform(data2, len, T(-1.0), T(1.0), stream); allocate(data2_trans, prms.n_rows * prms.n_components); int len_comp = params.n_col2 * prms.n_components; allocate(components2, len_comp); allocate(explained_vars2, prms.n_components); allocate(explained_var_ratio2, prms.n_components); allocate(singular_vals2, prms.n_components); tsvdFitTransform(data2, data2_trans, components2, explained_vars2, explained_var_ratio2, singular_vals2, prms, cublas_handle, cusolver_handle, stream); allocate(data2_back, len); tsvdInverseTransform(data2_trans, components2, data2_back, prms, cublas_handle, stream); CUBLAS_CHECK(hipblasDestroy(cublas_handle)); CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle)); CUDA_CHECK(hipStreamDestroy(stream)); } void SetUp() override { basicTest(); advancedTest(); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(components)); CUDA_CHECK(hipFree(singular_vals)); CUDA_CHECK(hipFree(components_ref)); CUDA_CHECK(hipFree(data2)); CUDA_CHECK(hipFree(data2_trans)); CUDA_CHECK(hipFree(data2_back)); CUDA_CHECK(hipFree(components2)); CUDA_CHECK(hipFree(explained_vars2)); CUDA_CHECK(hipFree(explained_var_ratio2)); CUDA_CHECK(hipFree(singular_vals2)); } protected: TsvdInputs<T> params; T *data, *components, *singular_vals, *components_ref, *explained_vars_ref; T *data2, *data2_trans, *data2_back, *components2, *explained_vars2, *explained_var_ratio2, *singular_vals2; }; const std::vector<TsvdInputs<float> > inputsf2 = { { 0.01f, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 0 }, { 0.01f, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 1 }, { 0.05f, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2 }, { 0.05f, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2 }}; const std::vector<TsvdInputs<double> > inputsd2 = { { 0.01, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 0 }, { 0.01, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 1 }, { 0.05, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2 }, { 0.05, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2 }}; typedef TsvdTest<float> TsvdTestLeftVecF; TEST_P(TsvdTestLeftVecF, Result) { ASSERT_TRUE( devArrMatch(components, components_ref, (params.n_col * params.n_col), CompareApproxAbs<float>(params.tolerance))); } typedef TsvdTest<double> TsvdTestLeftVecD; TEST_P(TsvdTestLeftVecD, Result) { ASSERT_TRUE( devArrMatch(components, components_ref, (params.n_col * params.n_col), CompareApproxAbs<double>(params.tolerance))); } typedef TsvdTest<float> TsvdTestDataVecF; TEST_P(TsvdTestDataVecF, Result) { ASSERT_TRUE( devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2), CompareApproxAbs<float>(params.tolerance))); } typedef TsvdTest<double> TsvdTestDataVecD; TEST_P(TsvdTestDataVecD, Result) { ASSERT_TRUE( devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2), CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecD, ::testing::ValuesIn(inputsd2)); } // end namespace ML
e2518489eaff1c46226da04a1180ad25f0963114.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <vector> #include "random/rng.h" #include "test_utils.h" #include <cuda_utils.h> #include "ml_utils.h" #include "tsvd/tsvd.h" namespace ML { using namespace MLCommon; template<typename T> struct TsvdInputs { T tolerance; int len; int n_row; int n_col; int len2; int n_row2; int n_col2; unsigned long long int seed; int algo; }; template<typename T> ::std::ostream& operator<<(::std::ostream& os, const TsvdInputs<T>& dims) { return os; } template<typename T> class TsvdTest: public ::testing::TestWithParam<TsvdInputs<T> > { protected: void basicTest() { cublasHandle_t cublas_handle; CUBLAS_CHECK(cublasCreate(&cublas_handle)); cusolverDnHandle_t cusolver_handle = NULL; CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle)); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); params = ::testing::TestWithParam<TsvdInputs<T>>::GetParam(); Random::Rng r(params.seed, MLCommon::Random::GenTaps); int len = params.len; allocate(data, len); std::vector<T> data_h = { 1.0, 2.0, 4.0, 2.0, 4.0, 5.0, 5.0, 4.0, 2.0, 1.0, 6.0, 4.0 }; data_h.resize(len); updateDevice(data, data_h.data(), len, stream); int len_comp = params.n_col * params.n_col; allocate(components, len_comp); allocate(singular_vals, params.n_col); std::vector<T> components_ref_h = { -0.3951, 0.1532, 0.9058, -0.7111, -0.6752, -0.1959, -0.5816, 0.7215, -0.3757 }; components_ref_h.resize(len_comp); allocate(components_ref, len_comp); updateDevice(components_ref, components_ref_h.data(), len_comp, stream); paramsTSVD prms; prms.n_cols = params.n_col; prms.n_rows = params.n_row; prms.n_components = params.n_col; if (params.algo == 0) prms.algorithm = solver::COV_EIG_DQ; else prms.algorithm = solver::COV_EIG_JACOBI; tsvdFit(data, components, singular_vals, prms, cublas_handle, cusolver_handle, stream); CUBLAS_CHECK(cublasDestroy(cublas_handle)); CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle)); CUDA_CHECK(cudaStreamDestroy(stream)); } void advancedTest() { cublasHandle_t cublas_handle; CUBLAS_CHECK(cublasCreate(&cublas_handle)); cusolverDnHandle_t cusolver_handle = NULL; CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle)); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); params = ::testing::TestWithParam<TsvdInputs<T>>::GetParam(); Random::Rng r(params.seed, MLCommon::Random::GenTaps); int len = params.len2; paramsTSVD prms; prms.n_cols = params.n_col2; prms.n_rows = params.n_row2; prms.n_components = params.n_col2; if (params.algo == 0) prms.algorithm = solver::COV_EIG_DQ; else if (params.algo == 1) prms.algorithm = solver::COV_EIG_JACOBI; else if (params.algo == 2) { prms.algorithm = solver::RANDOMIZED; prms.n_components = params.n_col2 - 15; } allocate(data2, len); r.uniform(data2, len, T(-1.0), T(1.0), stream); allocate(data2_trans, prms.n_rows * prms.n_components); int len_comp = params.n_col2 * prms.n_components; allocate(components2, len_comp); allocate(explained_vars2, prms.n_components); allocate(explained_var_ratio2, prms.n_components); allocate(singular_vals2, prms.n_components); tsvdFitTransform(data2, data2_trans, components2, explained_vars2, explained_var_ratio2, singular_vals2, prms, cublas_handle, cusolver_handle, stream); allocate(data2_back, len); tsvdInverseTransform(data2_trans, components2, data2_back, prms, cublas_handle, stream); CUBLAS_CHECK(cublasDestroy(cublas_handle)); CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle)); CUDA_CHECK(cudaStreamDestroy(stream)); } void SetUp() override { basicTest(); advancedTest(); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(components)); CUDA_CHECK(cudaFree(singular_vals)); CUDA_CHECK(cudaFree(components_ref)); CUDA_CHECK(cudaFree(data2)); CUDA_CHECK(cudaFree(data2_trans)); CUDA_CHECK(cudaFree(data2_back)); CUDA_CHECK(cudaFree(components2)); CUDA_CHECK(cudaFree(explained_vars2)); CUDA_CHECK(cudaFree(explained_var_ratio2)); CUDA_CHECK(cudaFree(singular_vals2)); } protected: TsvdInputs<T> params; T *data, *components, *singular_vals, *components_ref, *explained_vars_ref; T *data2, *data2_trans, *data2_back, *components2, *explained_vars2, *explained_var_ratio2, *singular_vals2; }; const std::vector<TsvdInputs<float> > inputsf2 = { { 0.01f, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 0 }, { 0.01f, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 1 }, { 0.05f, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2 }, { 0.05f, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2 }}; const std::vector<TsvdInputs<double> > inputsd2 = { { 0.01, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 0 }, { 0.01, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 1 }, { 0.05, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2 }, { 0.05, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2 }}; typedef TsvdTest<float> TsvdTestLeftVecF; TEST_P(TsvdTestLeftVecF, Result) { ASSERT_TRUE( devArrMatch(components, components_ref, (params.n_col * params.n_col), CompareApproxAbs<float>(params.tolerance))); } typedef TsvdTest<double> TsvdTestLeftVecD; TEST_P(TsvdTestLeftVecD, Result) { ASSERT_TRUE( devArrMatch(components, components_ref, (params.n_col * params.n_col), CompareApproxAbs<double>(params.tolerance))); } typedef TsvdTest<float> TsvdTestDataVecF; TEST_P(TsvdTestDataVecF, Result) { ASSERT_TRUE( devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2), CompareApproxAbs<float>(params.tolerance))); } typedef TsvdTest<double> TsvdTestDataVecD; TEST_P(TsvdTestDataVecD, Result) { ASSERT_TRUE( devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2), CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecD, ::testing::ValuesIn(inputsd2)); } // end namespace ML
3d123119c94a6c4383feab24fdc9895daeeec525.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of bp-layers. // // Copyright (C) 2020 Patrick Knbelreiter <knoebelreiter at icg dot tugraz dot at> // Christian Sormann <christian dot sormann at icg dot tugraz dot at> // Institute for Computer Graphics and Vision, Graz University of Technology // https://www.tugraz.at/institute/icg/teams/team-pock/ // // bp-layers is free software: you can redistribute it and/or modify it under the // terms of the GNU Affero General Public License as published by the Free Software // Foundation, either version 3 of the License, or any later version. // // bp-layers is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "flow_mp_sad_kernel.cuh" #include "tensor.h" #include "error_util.h" // ============================================================================ // CUDA KERNELS // ============================================================================ __global__ void flow_mp_sad_cuda_forward_kernel( KernelData f0, KernelData f1, int sws, KernelData cv_u, KernelData cv_v, KernelData u_star, KernelData v_star, int offset_u, int offset_v, int blockIdx_u, // necessary for argmin computation int blockIdx_v // same here ) { // parallelize over u, loop over v //const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; // const int u_idx = blockIdx.z * blockDim.z + threadIdx.z; const int u_idx = blockIdx.x * blockDim.x + threadIdx.x; const int x = blockIdx.z * blockDim.z + threadIdx.z; // shared memory for search-window matching costs extern __shared__ float sdata[]; // global defines unsigned short K = cv_u.size3; short sws_half = sws / 2; short u = u_idx - sws_half; //short sm_offset = blockDim.x * K * K * threadIdx.y + K * K * threadIdx.x; short sm_offset = blockDim.z * K * K * threadIdx.y + K * K * threadIdx.z; // check inside image for reference pixel int n = 0; if(x >= f0.size3 || y >= f0.size2 || u_idx >= K) return; // initialize all sws with constant value (initialize all v displacements for given u_idx) for(short v_idx = 0; v_idx < K; ++v_idx) { sdata[sm_offset + K * v_idx + u_idx] = 40.0; } __syncthreads(); // skip outside pixels // if(x + u < 0 || x + u >= f0.size3) // return; // I cannot return outside pixels directly, because I need all the treads for the min-computation // later!! if(x + offset_u + u >= 0 && x + offset_u + u < f0.size3) // check match inside { for(short v = -sws_half; v <= sws_half; ++v) { short v_idx = v + sws_half; // skip outside pixels (match-pixel) if(y + offset_v + v < 0 || y + offset_v + v >= f0.size2) continue; float sad = 0.0f; for(int c = 0; c < f0.size1; ++c) { sad += fabs(f0(n, c, y, x) - f1(n, c, y + offset_v + v, x + offset_u + u)); } // save result to shared mem sdata[sm_offset + K * v_idx + u_idx] = sad; //cv_all(n, y, x, v_idx, u_idx) = sad; } } __syncthreads(); // all u-threads must be ready here! // compute min-projection in shared memory // Note: u_idx is parallelized within the kernel! float min_v = 9999999.0; short argmin_v = 0; for(unsigned short v_idx = 0; v_idx < K; ++v_idx) { if(sdata[sm_offset + K * v_idx + u_idx] < min_v) { min_v = sdata[sm_offset + K * v_idx + u_idx]; argmin_v = v_idx; } } // update min only if the current block has a better min // if(min_v < cv_u(n, y, x, u_idx)) // for inplace variant which I do not have yet //{ cv_u(n, y, x, u_idx) = min_v; u_star(n, y, x, u_idx) = argmin_v + blockIdx_v * sws; // sws = K - 1 => default overlap //} // compute min-projection in shared memory // here I swap rules and use the u_idx as v_idx for easier parallelization float min_u = 9999999.0; short v_idx = u_idx; short argmin_u = 0; for(unsigned short u_idx = 0; u_idx < K; ++u_idx) { if(sdata[sm_offset + K * v_idx + u_idx] < min_u) { min_u = sdata[sm_offset + K * v_idx + u_idx]; argmin_u = u_idx; } } // update min only if the current block has a better min //if(min_u < cv_v(n, y, x, v_idx)) // for inplace variant which I do not have yet //{ cv_v(n, y, x, v_idx) = min_u; v_star(n, y, x, v_idx) = argmin_u + blockIdx_u * sws; // sws = K - 1 => default overlap //} } __global__ void flow_mp_sad_cuda_backward_kernel( KernelData f0, KernelData f1, int sws, KernelData in_grad_u, KernelData in_grad_v, KernelData u_star, KernelData v_star, KernelData df0, KernelData df1 ) { const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const unsigned int c = blockIdx.z * blockDim.z + threadIdx.z; float eps = 1e-15; // check inside image int n = 0; if(x >= f0.size3 || y >= f0.size2 || c >= f0.size1) return; int sws_half = sws / 2; float grad_f0 = 0.0f; float grad_f1 = 0.0f; for(short u = -sws_half; u <= sws_half; ++u) { short u_idx = u + sws_half; short v_idx = u_star(n, y, x, u_idx); short v = v_idx - sws_half; // skip outside pixels if(x + u >= 0 && x + u < f0.size3 && y + v >= 0 && y + v < f0.size2) { float diff = f0(n, c, y, x) - f1(n, c, y + v, x + u); if(fabsf(diff) > eps) // gradient is zero if diff is zero! { float update = diff / fabsf(diff) * in_grad_u(n, y, x, u_idx); // local update for df0 grad_f0 += update; // global update for df1 (multiple vars can point to one address!) atomicAdd(&df1(n, c, y + v, x + u), -update); } } } for(short v = -sws_half; v <= sws_half; ++v) { short v_idx = v + sws_half; short u_idx = v_star(n, y, x, v_idx); short u = u_idx - sws_half; // copied from above, only change is that here in_grad_v is used if(x + u >= 0 && x + u < f0.size3 && y + v >= 0 && y + v < f0.size2) { float diff = f0(n, c, y, x) - f1(n, c, y + v, x + u); if(fabsf(diff) > eps) // gradient is zero if diff is zero! { float update = diff / fabsf(diff) * in_grad_v(n, y, x, v_idx); // local update for df0 grad_f0 += update; // global update for df1 (multiple vars can point to one address!) atomicAdd(&df1(n, c, y + v, x + u), -update); } } } df0(n, c, y, x) = grad_f0; } // ============================================================================ // CPP KERNEL CALLS // ============================================================================ namespace cuda { std::vector<at::Tensor> flow_mp_sad_forward(at::Tensor f0, at::Tensor f1, int sws, int offset_u, int offset_v, int blockIdx_u, int blockIdx_v) { int N = f0.size(0); int C = f0.size(1); int H = f0.size(2); int W = f0.size(3); int K = sws + 1; auto cv_u = at::ones({N, H, W, K}, f0.options()) * 40; auto cv_v = at::ones({N, H, W, K}, f0.options()) * 40; auto u_star = at::zeros({N, H, W, K}, f0.options()); auto v_star = at::zeros({N, H, W, K}, f0.options()); //auto cv_all = at::ones({N, H, W, K, K}, f0.options()) * 40; if(K > 128) std::cout << "Error: Maximal search window size is " << K << " which is larger than max allowed 128!!" << std::endl; // parallelise over H x W x K // all K need to be in one block in order to have access to the same shared memory! // K needs to be the first, because last idx must be < 64. const dim3 blockSize(K, 1, 1); const dim3 numBlocks(::ceil(K / static_cast<float>(blockSize.x)), ::ceil(H / static_cast<float>(blockSize.y)), ::ceil(W / static_cast<float>(blockSize.z))); const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; // std::cout << "N=" << N << " C=" << C << " H=" << H << " W=" << W << " K=" << K << std::endl; // std::cout << "threadsPerBlock=" << threadsPerBlock << std::endl; // std::cout << "numBlocks.x=" << numBlocks.x << " .y=" << numBlocks.y << " .z=" << numBlocks.z << std::endl; // std::cout << "mem-use=" << threadsPerBlock*K*sizeof(float) << "bytes" << std::endl; //CudaTimer cut; //cut.start(); hipLaunchKernelGGL(( flow_mp_sad_cuda_forward_kernel), dim3(numBlocks), dim3(blockSize), threadsPerBlock*K*sizeof(float), 0, f0, f1, sws, cv_u, cv_v, u_star, v_star, offset_u, offset_v, blockIdx_u, blockIdx_v); cudaSafeCall(hipGetLastError()); // hipDeviceSynchronize(); //std::cout << "SAD forward time " << cut.elapsed() << std::endl; std::vector<at::Tensor> res; //cost_vols.push_back(cv_all); res.push_back(cv_u); res.push_back(cv_v); res.push_back(u_star); res.push_back(v_star); return res; } std::vector<at::Tensor> flow_mp_sad_backward(at::Tensor f0, at::Tensor f1, int sws, at::Tensor in_grad_u, at::Tensor in_grad_v, at::Tensor u_star, at::Tensor v_star) { int N = f0.size(0); int C = f0.size(1); int H = f0.size(2); int W = f0.size(3); int K = sws + 1; auto df0 = at::zeros_like(f0); auto df1 = at::zeros_like(f1); // parallelise over H x W x D const dim3 blockSize(8, 8, 4); const dim3 numBlocks(::ceil(W / static_cast<float>(blockSize.x)), ::ceil(H / static_cast<float>(blockSize.y)), ::ceil(C / static_cast<float>(blockSize.z))); //CudaTimer cut; //cut.start(); hipLaunchKernelGGL(( flow_mp_sad_cuda_backward_kernel), dim3(numBlocks), dim3(blockSize), 0, 0, f0, f1, sws, in_grad_u, in_grad_v, u_star, v_star, df0, df1); cudaSafeCall(hipGetLastError()); // hipDeviceSynchronize(); //std::cout << "SAD backward time " << cut.elapsed() << std::endl; std::vector<at::Tensor> gradients; gradients.push_back(df0); gradients.push_back(df1); return gradients; } }
3d123119c94a6c4383feab24fdc9895daeeec525.cu
// This file is part of bp-layers. // // Copyright (C) 2020 Patrick Knöbelreiter <knoebelreiter at icg dot tugraz dot at> // Christian Sormann <christian dot sormann at icg dot tugraz dot at> // Institute for Computer Graphics and Vision, Graz University of Technology // https://www.tugraz.at/institute/icg/teams/team-pock/ // // bp-layers is free software: you can redistribute it and/or modify it under the // terms of the GNU Affero General Public License as published by the Free Software // Foundation, either version 3 of the License, or any later version. // // bp-layers is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. #include <cuda.h> #include <cuda_runtime.h> #include "flow_mp_sad_kernel.cuh" #include "tensor.h" #include "error_util.h" // ============================================================================ // CUDA KERNELS // ============================================================================ __global__ void flow_mp_sad_cuda_forward_kernel( KernelData f0, KernelData f1, int sws, KernelData cv_u, KernelData cv_v, KernelData u_star, KernelData v_star, int offset_u, int offset_v, int blockIdx_u, // necessary for argmin computation int blockIdx_v // same here ) { // parallelize over u, loop over v //const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; // const int u_idx = blockIdx.z * blockDim.z + threadIdx.z; const int u_idx = blockIdx.x * blockDim.x + threadIdx.x; const int x = blockIdx.z * blockDim.z + threadIdx.z; // shared memory for search-window matching costs extern __shared__ float sdata[]; // global defines unsigned short K = cv_u.size3; short sws_half = sws / 2; short u = u_idx - sws_half; //short sm_offset = blockDim.x * K * K * threadIdx.y + K * K * threadIdx.x; short sm_offset = blockDim.z * K * K * threadIdx.y + K * K * threadIdx.z; // check inside image for reference pixel int n = 0; if(x >= f0.size3 || y >= f0.size2 || u_idx >= K) return; // initialize all sws with constant value (initialize all v displacements for given u_idx) for(short v_idx = 0; v_idx < K; ++v_idx) { sdata[sm_offset + K * v_idx + u_idx] = 40.0; } __syncthreads(); // skip outside pixels // if(x + u < 0 || x + u >= f0.size3) // return; // I cannot return outside pixels directly, because I need all the treads for the min-computation // later!! if(x + offset_u + u >= 0 && x + offset_u + u < f0.size3) // check match inside { for(short v = -sws_half; v <= sws_half; ++v) { short v_idx = v + sws_half; // skip outside pixels (match-pixel) if(y + offset_v + v < 0 || y + offset_v + v >= f0.size2) continue; float sad = 0.0f; for(int c = 0; c < f0.size1; ++c) { sad += fabs(f0(n, c, y, x) - f1(n, c, y + offset_v + v, x + offset_u + u)); } // save result to shared mem sdata[sm_offset + K * v_idx + u_idx] = sad; //cv_all(n, y, x, v_idx, u_idx) = sad; } } __syncthreads(); // all u-threads must be ready here! // compute min-projection in shared memory // Note: u_idx is parallelized within the kernel! float min_v = 9999999.0; short argmin_v = 0; for(unsigned short v_idx = 0; v_idx < K; ++v_idx) { if(sdata[sm_offset + K * v_idx + u_idx] < min_v) { min_v = sdata[sm_offset + K * v_idx + u_idx]; argmin_v = v_idx; } } // update min only if the current block has a better min // if(min_v < cv_u(n, y, x, u_idx)) // for inplace variant which I do not have yet //{ cv_u(n, y, x, u_idx) = min_v; u_star(n, y, x, u_idx) = argmin_v + blockIdx_v * sws; // sws = K - 1 => default overlap //} // compute min-projection in shared memory // here I swap rules and use the u_idx as v_idx for easier parallelization float min_u = 9999999.0; short v_idx = u_idx; short argmin_u = 0; for(unsigned short u_idx = 0; u_idx < K; ++u_idx) { if(sdata[sm_offset + K * v_idx + u_idx] < min_u) { min_u = sdata[sm_offset + K * v_idx + u_idx]; argmin_u = u_idx; } } // update min only if the current block has a better min //if(min_u < cv_v(n, y, x, v_idx)) // for inplace variant which I do not have yet //{ cv_v(n, y, x, v_idx) = min_u; v_star(n, y, x, v_idx) = argmin_u + blockIdx_u * sws; // sws = K - 1 => default overlap //} } __global__ void flow_mp_sad_cuda_backward_kernel( KernelData f0, KernelData f1, int sws, KernelData in_grad_u, KernelData in_grad_v, KernelData u_star, KernelData v_star, KernelData df0, KernelData df1 ) { const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const unsigned int c = blockIdx.z * blockDim.z + threadIdx.z; float eps = 1e-15; // check inside image int n = 0; if(x >= f0.size3 || y >= f0.size2 || c >= f0.size1) return; int sws_half = sws / 2; float grad_f0 = 0.0f; float grad_f1 = 0.0f; for(short u = -sws_half; u <= sws_half; ++u) { short u_idx = u + sws_half; short v_idx = u_star(n, y, x, u_idx); short v = v_idx - sws_half; // skip outside pixels if(x + u >= 0 && x + u < f0.size3 && y + v >= 0 && y + v < f0.size2) { float diff = f0(n, c, y, x) - f1(n, c, y + v, x + u); if(fabsf(diff) > eps) // gradient is zero if diff is zero! { float update = diff / fabsf(diff) * in_grad_u(n, y, x, u_idx); // local update for df0 grad_f0 += update; // global update for df1 (multiple vars can point to one address!) atomicAdd(&df1(n, c, y + v, x + u), -update); } } } for(short v = -sws_half; v <= sws_half; ++v) { short v_idx = v + sws_half; short u_idx = v_star(n, y, x, v_idx); short u = u_idx - sws_half; // copied from above, only change is that here in_grad_v is used if(x + u >= 0 && x + u < f0.size3 && y + v >= 0 && y + v < f0.size2) { float diff = f0(n, c, y, x) - f1(n, c, y + v, x + u); if(fabsf(diff) > eps) // gradient is zero if diff is zero! { float update = diff / fabsf(diff) * in_grad_v(n, y, x, v_idx); // local update for df0 grad_f0 += update; // global update for df1 (multiple vars can point to one address!) atomicAdd(&df1(n, c, y + v, x + u), -update); } } } df0(n, c, y, x) = grad_f0; } // ============================================================================ // CPP KERNEL CALLS // ============================================================================ namespace cuda { std::vector<at::Tensor> flow_mp_sad_forward(at::Tensor f0, at::Tensor f1, int sws, int offset_u, int offset_v, int blockIdx_u, int blockIdx_v) { int N = f0.size(0); int C = f0.size(1); int H = f0.size(2); int W = f0.size(3); int K = sws + 1; auto cv_u = at::ones({N, H, W, K}, f0.options()) * 40; auto cv_v = at::ones({N, H, W, K}, f0.options()) * 40; auto u_star = at::zeros({N, H, W, K}, f0.options()); auto v_star = at::zeros({N, H, W, K}, f0.options()); //auto cv_all = at::ones({N, H, W, K, K}, f0.options()) * 40; if(K > 128) std::cout << "Error: Maximal search window size is " << K << " which is larger than max allowed 128!!" << std::endl; // parallelise over H x W x K // all K need to be in one block in order to have access to the same shared memory! // K needs to be the first, because last idx must be < 64. const dim3 blockSize(K, 1, 1); const dim3 numBlocks(std::ceil(K / static_cast<float>(blockSize.x)), std::ceil(H / static_cast<float>(blockSize.y)), std::ceil(W / static_cast<float>(blockSize.z))); const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; // std::cout << "N=" << N << " C=" << C << " H=" << H << " W=" << W << " K=" << K << std::endl; // std::cout << "threadsPerBlock=" << threadsPerBlock << std::endl; // std::cout << "numBlocks.x=" << numBlocks.x << " .y=" << numBlocks.y << " .z=" << numBlocks.z << std::endl; // std::cout << "mem-use=" << threadsPerBlock*K*sizeof(float) << "bytes" << std::endl; //CudaTimer cut; //cut.start(); flow_mp_sad_cuda_forward_kernel<<<numBlocks, blockSize, threadsPerBlock*K*sizeof(float)>>>(f0, f1, sws, cv_u, cv_v, u_star, v_star, offset_u, offset_v, blockIdx_u, blockIdx_v); cudaSafeCall(cudaGetLastError()); // cudaDeviceSynchronize(); //std::cout << "SAD forward time " << cut.elapsed() << std::endl; std::vector<at::Tensor> res; //cost_vols.push_back(cv_all); res.push_back(cv_u); res.push_back(cv_v); res.push_back(u_star); res.push_back(v_star); return res; } std::vector<at::Tensor> flow_mp_sad_backward(at::Tensor f0, at::Tensor f1, int sws, at::Tensor in_grad_u, at::Tensor in_grad_v, at::Tensor u_star, at::Tensor v_star) { int N = f0.size(0); int C = f0.size(1); int H = f0.size(2); int W = f0.size(3); int K = sws + 1; auto df0 = at::zeros_like(f0); auto df1 = at::zeros_like(f1); // parallelise over H x W x D const dim3 blockSize(8, 8, 4); const dim3 numBlocks(std::ceil(W / static_cast<float>(blockSize.x)), std::ceil(H / static_cast<float>(blockSize.y)), std::ceil(C / static_cast<float>(blockSize.z))); //CudaTimer cut; //cut.start(); flow_mp_sad_cuda_backward_kernel<<<numBlocks, blockSize>>>(f0, f1, sws, in_grad_u, in_grad_v, u_star, v_star, df0, df1); cudaSafeCall(cudaGetLastError()); // cudaDeviceSynchronize(); //std::cout << "SAD backward time " << cut.elapsed() << std::endl; std::vector<at::Tensor> gradients; gradients.push_back(df0); gradients.push_back(df1); return gradients; } }
ab39764c9c9da81c000a310e96164b5e02576324.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <cstdlib> #include <ctime> #include <math.h> #include "../headers/graph.h" #define THREADS_PER_BLOCK_X 32 #define THREADS_PER_BLOCK_Y 32 #define MAX_THREADS_PER_BLOCK 1024 #define MAX_SHARED_MEM_PER_BLOCK 1024 #define S_MATRIX_SIZE 32 #define AFW_CONST 1 #define BFW_CONST 2 #define CFW_CONST 3 #define DFW_CONST 4 using namespace std; __global__ void AloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m){ int tx = threadIdx.x, ty = threadIdx.y, sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; for(int k = 0; k < m; k++){ if(tx == 0 && ty == 0){ //update cell (k,k) sum = d_x[u_row_st + k][u_col_st + k] + d_x[v_row_st + k][v_col_st + k]; d_x[x_row_st + k][x_col_st + k] = d_x[x_row_st + k][x_col_st + k] > sum ? sum : d_x[x_row_st + k][x_col_st + k]; } syncthreads(); //Thread X responsible for updating current row. if(r_offset_start <= k && k<= r_offset_end){ for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; sum = d_x[u_row_st + k][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + k][x_col_st + j] > sum) d_x[x_row_st + k][x_col_st + j] = sum; } } syncthreads(); //Thread Y responsible for updating current column if(c_offset_start <= k && k <= c_offset_end){ for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + k]; if(d_x[x_row_st + i][x_col_st + k] > sum) d_x[x_row_st + i][x_col_st + k] = sum; } } syncthreads(); for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + i][x_col_st + j] > sum) d_x[x_row_st + i][x_col_st + j] = sum; } } syncthreads(); } } __global__ void BloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ //Change the start cells, of x, u ,v x_row_st += submatrix_offset * m; x_col_st += blockIdx.y * m; u_row_st += submatrix_offset * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += blockIdx.y * m; if(!(blockIdx.y == submatrix_offset && parent == AFW_CONST)){ // int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; for(int k=0; k < m; k++){ //Update kth row using the corresponding thread. if(r_offset_start <= k && k <= r_offset_end){ for(int j = c_offset_start; j <= c_offset_end; j++){ sum = d_x[u_row_st + k][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + k][x_col_st + j] > sum) d_x[x_row_st + k][x_col_st + j] = sum; } } syncthreads(); //Update the other cells. for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; for(int j = c_offset_start; j <= c_offset_end; j++){ int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + i][x_col_st + j] > sum) d_x[x_row_st + i][x_col_st + j] = sum; } } syncthreads(); } } } __global__ void CloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ x_row_st += blockIdx.x * m; x_col_st += submatrix_offset * m; u_row_st += blockIdx.x * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += submatrix_offset * m; if(!(blockIdx.x == submatrix_offset && parent == AFW_CONST)){ //int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; for(int k=0; k < m; k++){ if(c_offset_start <= k && k <= c_offset_end){ for(int i = r_offset_start; i <= r_offset_end; i++){ sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + k]; if(d_x[x_row_st + i][x_col_st + k] > sum) d_x[x_row_st + i][x_col_st + k] = sum; } } syncthreads(); for(int i = r_offset_start; i <= r_offset_end; i++){ for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + i][x_col_st + j] > sum) d_x[x_row_st + i][x_col_st + j] = sum; } } syncthreads(); }//outer k loop } } __global__ void DloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ x_row_st += blockIdx.x * m; x_col_st += blockIdx.y * m; u_row_st += blockIdx.x * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += blockIdx.y * m; //AFW_PARENT -> blockIdx.x != submatrix_offset && blockIdx.y != submatrix_offset //BFW_PARENT -> blockIdx.x != submatrix_offset //CFW_PARENT -> blockIdx.y != submatrix_offset int flag1 = parent == AFW_CONST && blockIdx.x == submatrix_offset && blockIdx.y == submatrix_offset; int flag2 = parent == BFW_CONST && blockIdx.x == submatrix_offset; int flag3 = parent == CFW_CONST && blockIdx.y == submatrix_offset; if(!(flag1 || flag2 || flag3)){ int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; for(int k = 0; k < m; k++){ for(int i = r_offset_start; i <= r_offset_end; i++){ for(int j = c_offset_start; j <= c_offset_end; j++){ int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + i][x_col_st + j] > sum) d_x[x_row_st + i][x_col_st + j] = sum; } } syncthreads(); }//outer for. } } void DFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR DFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case DloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update all submatrices with Dloop dim3 blocksPerGrid_D(r, r); hipLaunchKernelGGL(( DloopFW), dim3(blocksPerGrid_D), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, DFW_CONST); hipDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int i = 0; i < r; i++){ for(int j = 0; j < r; j++){ DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } hipDeviceSynchronize(); }//outer k loop } } } void CFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR CFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case CloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update kth col with Cloop dim3 blocksPerGrid_C(r, 1); hipLaunchKernelGGL(( CloopFW), dim3(blocksPerGrid_C), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, CFW_CONST); hipDeviceSynchronize(); //Update remaining cells with Dloop dim3 blocksPerGrid_D(r, r); hipLaunchKernelGGL(( DloopFW), dim3(blocksPerGrid_D), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, CFW_CONST); hipDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int i = 0; i < r; i++){ CFW(d_x, x_row_st + i*sub_size, x_col_st + offset, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ for(int j = 0; j < r; j++){ if(j == k) continue; DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer k loop } } } void BFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR BFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case BloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update kth row with Bloop dim3 blocksPerGrid_B(1, r); hipLaunchKernelGGL(( BloopFW), dim3(blocksPerGrid_B), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, BFW_CONST); hipDeviceSynchronize(); //Update remaining cells with Dloop dim3 blocksPerGrid_D(r, r); hipLaunchKernelGGL(( DloopFW), dim3(blocksPerGrid_D), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, BFW_CONST); hipDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int j = 0; j < r; j++){ BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ if(i == k) continue; for(int j = 0; j < r; j++){ DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer k loop } } } //Figure 4 implementation : HW 5 void AFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR AFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case hipLaunchKernelGGL(( AloopFW), dim3(1), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth+1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ int offset = k * sub_size; hipLaunchKernelGGL(( AloopFW), dim3(1), dim3(threadsPerBlock), 0, 0, d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size); hipDeviceSynchronize(); //Update kth row submatrices and kth col submatrices in parallel dim3 blocksPerGrid_B(1, r); hipLaunchKernelGGL(( BloopFW), dim3(blocksPerGrid_B), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); hipDeviceSynchronize(); dim3 blocksPerGrid_C(r, 1); hipLaunchKernelGGL(( CloopFW), dim3(blocksPerGrid_C), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); hipDeviceSynchronize(); //update remaining submatrices dim3 blocksPerGrid_D(r, r); hipLaunchKernelGGL(( DloopFW), dim3(blocksPerGrid_D), dim3(threadsPerBlock), 0, 0, d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); hipDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; AFW(d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); for(int j = 0; j < r; j++){ if(j == k) continue; BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); CFW(d_x, x_row_st + j*sub_size, x_col_st + offset, u_row_st + j*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ if(i == k) continue; for(int j = 0; j < r; j++){ if(j == k) continue; DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer for }//else } }//AFW int ** copy_matrix_to_host(int ** dev_matrix, int n){ int ** new_matrix = new int*[n+1]; for(int i=1;i <= n; i++){ new_matrix[i] = new int[n+1]; int * begin; hipMemcpy(&begin, &dev_matrix[i], sizeof (int *), hipMemcpyDeviceToHost); hipMemcpy(new_matrix[i], begin, (n+1) * sizeof(int), hipMemcpyDeviceToHost); } return new_matrix; } int ** copy_matrix_to_device(int ** host_matrix, int n){ //int ** dev_matrix = new int*[n+1]; int ** dev_matrix; hipError_t err = hipMalloc(&dev_matrix, (n+1) * sizeof(int *)); if(err != hipSuccess){ printf("Error allocating memory on device."); return NULL; } for(int i = 1; i <= n; i++){ //printf("%x\n", &addr[i]); int * start; err = hipMalloc(&start, (n+1)*sizeof(int)); if(err != hipSuccess){ printf("Error allocating memory on device."); return NULL; } hipMemcpy(dev_matrix+i, &start, sizeof(int *), hipMemcpyHostToDevice); hipMemcpy(start, host_matrix[i], (n+1) * sizeof(int), hipMemcpyHostToDevice); } return dev_matrix; } void fw_iterative_serial(int ** matrix, int n){ int i,j,k = 0; for(k = 1; k <= n; k++){ for(i = 1; i <= n; i++){ for(j = 1; j <= n; j++){ if(matrix[i][j] > matrix[i][k] + matrix[k][j]) matrix[i][j] = matrix[i][k] + matrix[k][j]; } } } }//end of iterative int compare(int ** orig, int ** new_matrix, int n){ fw_iterative_serial(orig, n); for(int i=1; i <= n; i++){ for(int j=1; j <= n; j++){ if(orig[i][j] != new_matrix[i][j]) return 0; } } return 1; } int main(int argc, char * argv[]) { //Matrix int n = atoi(argv[1]); int ** matrix = generate_matrix(n); int ** dev_matrix = copy_matrix_to_device(matrix, n); if(dev_matrix == NULL) return 0; // fw_iterative_outer(dev_matrix, n); /* if(n <= 32){ printf("Original matrix: \n"); print_matrix(matrix, n); } */ long long start, end; start = clock(); int tilesize[2] = {2, INT_MAX}; //int tilesize[3] = {2, n/32, INT_MAX}; AFW(dev_matrix, 1, 1, 1, 1, 1, 1, n, 0, tilesize); end = clock(); if(n <= 32){ int ** new_matrix = copy_matrix_to_host(dev_matrix, n); printf("\nWith updated distances: \n"); print_matrix(new_matrix, n); } if(n <= 1024){ int ** new_matrix = copy_matrix_to_host(dev_matrix, n); int ans = compare(matrix, new_matrix, n); if(ans) printf("ANSWER: CORRECT\n"); else printf("ANSWER: WRONG\n"); } cout << "Runtime: " << double(end-start)/double(CLOCKS_PER_SEC) << endl; return 0; }
ab39764c9c9da81c000a310e96164b5e02576324.cu
#include <stdio.h> #include <iostream> #include <cstdlib> #include <ctime> #include <math.h> #include "../headers/graph.h" #define THREADS_PER_BLOCK_X 32 #define THREADS_PER_BLOCK_Y 32 #define MAX_THREADS_PER_BLOCK 1024 #define MAX_SHARED_MEM_PER_BLOCK 1024 #define S_MATRIX_SIZE 32 #define AFW_CONST 1 #define BFW_CONST 2 #define CFW_CONST 3 #define DFW_CONST 4 using namespace std; __global__ void AloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m){ int tx = threadIdx.x, ty = threadIdx.y, sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; for(int k = 0; k < m; k++){ if(tx == 0 && ty == 0){ //update cell (k,k) sum = d_x[u_row_st + k][u_col_st + k] + d_x[v_row_st + k][v_col_st + k]; d_x[x_row_st + k][x_col_st + k] = d_x[x_row_st + k][x_col_st + k] > sum ? sum : d_x[x_row_st + k][x_col_st + k]; } syncthreads(); //Thread X responsible for updating current row. if(r_offset_start <= k && k<= r_offset_end){ for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; sum = d_x[u_row_st + k][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + k][x_col_st + j] > sum) d_x[x_row_st + k][x_col_st + j] = sum; } } syncthreads(); //Thread Y responsible for updating current column if(c_offset_start <= k && k <= c_offset_end){ for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + k]; if(d_x[x_row_st + i][x_col_st + k] > sum) d_x[x_row_st + i][x_col_st + k] = sum; } } syncthreads(); for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + i][x_col_st + j] > sum) d_x[x_row_st + i][x_col_st + j] = sum; } } syncthreads(); } } __global__ void BloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ //Change the start cells, of x, u ,v x_row_st += submatrix_offset * m; x_col_st += blockIdx.y * m; u_row_st += submatrix_offset * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += blockIdx.y * m; if(!(blockIdx.y == submatrix_offset && parent == AFW_CONST)){ // int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; for(int k=0; k < m; k++){ //Update kth row using the corresponding thread. if(r_offset_start <= k && k <= r_offset_end){ for(int j = c_offset_start; j <= c_offset_end; j++){ sum = d_x[u_row_st + k][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + k][x_col_st + j] > sum) d_x[x_row_st + k][x_col_st + j] = sum; } } syncthreads(); //Update the other cells. for(int i = r_offset_start; i <= r_offset_end; i++){ if(i == k) continue; for(int j = c_offset_start; j <= c_offset_end; j++){ int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + i][x_col_st + j] > sum) d_x[x_row_st + i][x_col_st + j] = sum; } } syncthreads(); } } } __global__ void CloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ x_row_st += blockIdx.x * m; x_col_st += submatrix_offset * m; u_row_st += blockIdx.x * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += submatrix_offset * m; if(!(blockIdx.x == submatrix_offset && parent == AFW_CONST)){ //int tx = threadIdx.x, ty = threadIdx.y; int sum; int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; for(int k=0; k < m; k++){ if(c_offset_start <= k && k <= c_offset_end){ for(int i = r_offset_start; i <= r_offset_end; i++){ sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + k]; if(d_x[x_row_st + i][x_col_st + k] > sum) d_x[x_row_st + i][x_col_st + k] = sum; } } syncthreads(); for(int i = r_offset_start; i <= r_offset_end; i++){ for(int j = c_offset_start; j <= c_offset_end; j++){ if(j == k) continue; int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + i][x_col_st + j] > sum) d_x[x_row_st + i][x_col_st + j] = sum; } } syncthreads(); }//outer k loop } } __global__ void DloopFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int m, int submatrix_offset, int parent){ x_row_st += blockIdx.x * m; x_col_st += blockIdx.y * m; u_row_st += blockIdx.x * m; u_col_st += submatrix_offset * m; v_row_st += submatrix_offset * m; v_col_st += blockIdx.y * m; //AFW_PARENT -> blockIdx.x != submatrix_offset && blockIdx.y != submatrix_offset //BFW_PARENT -> blockIdx.x != submatrix_offset //CFW_PARENT -> blockIdx.y != submatrix_offset int flag1 = parent == AFW_CONST && blockIdx.x == submatrix_offset && blockIdx.y == submatrix_offset; int flag2 = parent == BFW_CONST && blockIdx.x == submatrix_offset; int flag3 = parent == CFW_CONST && blockIdx.y == submatrix_offset; if(!(flag1 || flag2 || flag3)){ int rowsPerThread = m / blockDim.x; int colsPerThread = m / blockDim.y; int r_offset_start = threadIdx.x * rowsPerThread; int r_offset_end = r_offset_start + rowsPerThread - 1; int c_offset_start = threadIdx.y * colsPerThread; int c_offset_end = c_offset_start + colsPerThread - 1; for(int k = 0; k < m; k++){ for(int i = r_offset_start; i <= r_offset_end; i++){ for(int j = c_offset_start; j <= c_offset_end; j++){ int sum = d_x[u_row_st + i][u_col_st + k] + d_x[v_row_st + k][v_col_st + j]; if(d_x[x_row_st + i][x_col_st + j] > sum) d_x[x_row_st + i][x_col_st + j] = sum; } } syncthreads(); }//outer for. } } void DFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR DFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case DloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update all submatrices with Dloop dim3 blocksPerGrid_D(r, r); DloopFW<<<blocksPerGrid_D, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, DFW_CONST); cudaDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int i = 0; i < r; i++){ for(int j = 0; j < r; j++){ DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } cudaDeviceSynchronize(); }//outer k loop } } } void CFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR CFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case CloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update kth col with Cloop dim3 blocksPerGrid_C(r, 1); CloopFW<<<blocksPerGrid_C, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, CFW_CONST); cudaDeviceSynchronize(); //Update remaining cells with Dloop dim3 blocksPerGrid_D(r, r); DloopFW<<<blocksPerGrid_D, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, CFW_CONST); cudaDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int i = 0; i < r; i++){ CFW(d_x, x_row_st + i*sub_size, x_col_st + offset, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ for(int j = 0; j < r; j++){ if(j == k) continue; DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer k loop } } } void BFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR BFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case BloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth + 1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ //Update kth row with Bloop dim3 blocksPerGrid_B(1, r); BloopFW<<<blocksPerGrid_B, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, BFW_CONST); cudaDeviceSynchronize(); //Update remaining cells with Dloop dim3 blocksPerGrid_D(r, r); DloopFW<<<blocksPerGrid_D, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, BFW_CONST); cudaDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; for(int j = 0; j < r; j++){ BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ if(i == k) continue; for(int j = 0; j < r; j++){ DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer k loop } } } //Figure 4 implementation : HW 5 void AFW(int ** d_x, int x_row_st, int x_col_st, int u_row_st, int u_col_st, int v_row_st, int v_col_st, int n, int depth, int * tilesize){ int r = tilesize[depth]; if(r > n){ printf("ERR AFW: Shouldn't reach here.\n"); /* int threadX = min(n, THREADS_PER_BLOCK_X); int threadY = min(n, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); //Execute base case AloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, n); */ } else{ int sub_size = n / r; if(sub_size < tilesize[depth+1]){ int threadX = min(sub_size, THREADS_PER_BLOCK_X); int threadY = min(sub_size, THREADS_PER_BLOCK_Y); dim3 threadsPerBlock(threadX, threadY); for(int k = 0; k < r; k++){ int offset = k * sub_size; AloopFW<<<1, threadsPerBlock>>>(d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size); cudaDeviceSynchronize(); //Update kth row submatrices and kth col submatrices in parallel dim3 blocksPerGrid_B(1, r); BloopFW<<<blocksPerGrid_B, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); cudaDeviceSynchronize(); dim3 blocksPerGrid_C(r, 1); CloopFW<<<blocksPerGrid_C, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); cudaDeviceSynchronize(); //update remaining submatrices dim3 blocksPerGrid_D(r, r); DloopFW<<<blocksPerGrid_D, threadsPerBlock>>>(d_x, x_row_st, x_col_st, u_row_st, u_col_st, v_row_st, v_col_st, sub_size, k, AFW_CONST); cudaDeviceSynchronize(); } } else{ for(int k = 0; k < r; k++){ int offset = k*sub_size; AFW(d_x, x_row_st + offset, x_col_st + offset, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); for(int j = 0; j < r; j++){ if(j == k) continue; BFW(d_x, x_row_st + offset, x_col_st + j*sub_size, u_row_st + offset, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); CFW(d_x, x_row_st + j*sub_size, x_col_st + offset, u_row_st + j*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + offset, sub_size, depth+1, tilesize); } for(int i = 0; i < r; i++){ if(i == k) continue; for(int j = 0; j < r; j++){ if(j == k) continue; DFW(d_x, x_row_st + i*sub_size, x_col_st + j*sub_size, u_row_st + i*sub_size, u_col_st + offset, v_row_st + offset, v_col_st + j*sub_size, sub_size, depth+1, tilesize); } } }//outer for }//else } }//AFW int ** copy_matrix_to_host(int ** dev_matrix, int n){ int ** new_matrix = new int*[n+1]; for(int i=1;i <= n; i++){ new_matrix[i] = new int[n+1]; int * begin; cudaMemcpy(&begin, &dev_matrix[i], sizeof (int *), cudaMemcpyDeviceToHost); cudaMemcpy(new_matrix[i], begin, (n+1) * sizeof(int), cudaMemcpyDeviceToHost); } return new_matrix; } int ** copy_matrix_to_device(int ** host_matrix, int n){ //int ** dev_matrix = new int*[n+1]; int ** dev_matrix; cudaError_t err = cudaMalloc(&dev_matrix, (n+1) * sizeof(int *)); if(err != cudaSuccess){ printf("Error allocating memory on device."); return NULL; } for(int i = 1; i <= n; i++){ //printf("%x\n", &addr[i]); int * start; err = cudaMalloc(&start, (n+1)*sizeof(int)); if(err != cudaSuccess){ printf("Error allocating memory on device."); return NULL; } cudaMemcpy(dev_matrix+i, &start, sizeof(int *), cudaMemcpyHostToDevice); cudaMemcpy(start, host_matrix[i], (n+1) * sizeof(int), cudaMemcpyHostToDevice); } return dev_matrix; } void fw_iterative_serial(int ** matrix, int n){ int i,j,k = 0; for(k = 1; k <= n; k++){ for(i = 1; i <= n; i++){ for(j = 1; j <= n; j++){ if(matrix[i][j] > matrix[i][k] + matrix[k][j]) matrix[i][j] = matrix[i][k] + matrix[k][j]; } } } }//end of iterative int compare(int ** orig, int ** new_matrix, int n){ fw_iterative_serial(orig, n); for(int i=1; i <= n; i++){ for(int j=1; j <= n; j++){ if(orig[i][j] != new_matrix[i][j]) return 0; } } return 1; } int main(int argc, char * argv[]) { //Matrix int n = atoi(argv[1]); int ** matrix = generate_matrix(n); int ** dev_matrix = copy_matrix_to_device(matrix, n); if(dev_matrix == NULL) return 0; // fw_iterative_outer(dev_matrix, n); /* if(n <= 32){ printf("Original matrix: \n"); print_matrix(matrix, n); } */ long long start, end; start = clock(); int tilesize[2] = {2, INT_MAX}; //int tilesize[3] = {2, n/32, INT_MAX}; AFW(dev_matrix, 1, 1, 1, 1, 1, 1, n, 0, tilesize); end = clock(); if(n <= 32){ int ** new_matrix = copy_matrix_to_host(dev_matrix, n); printf("\nWith updated distances: \n"); print_matrix(new_matrix, n); } if(n <= 1024){ int ** new_matrix = copy_matrix_to_host(dev_matrix, n); int ans = compare(matrix, new_matrix, n); if(ans) printf("ANSWER: CORRECT\n"); else printf("ANSWER: WRONG\n"); } cout << "Runtime: " << double(end-start)/double(CLOCKS_PER_SEC) << endl; return 0; }
601d67d540b30cdfc95ddc18e03210b61caea17b.hip
// !!! This is a file automatically generated by hipify!!! /** Flattening strategy for Euler - Atomic stages. COMPILE: nvcc EulerArray.cu -o ./bin/EulerArray -gencode arch=compute_35,code=sm_35 -lm -restrict -Xptxas=-v */ #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include "myVectorTypes.h" #include <ostream> #include <iostream> #include <cstdio> #include <cstdlib> #include <cmath> #include <fstream> #include <omp.h> // This file uses vector types to hold the dependent variables so fundamental operations on those types are defined as macros to accommodate different data types. Also, keeping types consistent for common constants (0, 1, 2, etc) used in computation has an appreciable positive effect on performance. #ifndef GPUNUM #define GPUNUM 0 #endif // We're just going to assume doubles #define REAL double #define REALtwo double2 #define REALthree double3 #define ZERO 0.0 #define QUARTER 0.25 #define HALF 0.5 #define ONE 1.0 #define TWO 2.0 #define SQUAREROOT(x) sqrt(x) // Hardwire in the length of the const REAL lx = 1.0; // The structure to carry the initial and boundary conditions. // 0 is left 1 is right. REALthree bd[2]; //dbd is the boundary condition in device constant memory. __constant__ REALthree dbd[2]; //Protoype for useful information struct. struct dimensions { REAL gam; // Heat capacity ratio REAL mgam; // 1- Heat capacity ratio REAL dt_dx; // deltat/deltax int base; // Length of node + stencils at end (4) int idxend; // Last index (number of spatial points - 1) int idxend_1; // Num spatial points - 2 int hts[5]; // The five point stencil around base/2 }; // structure of dimensions in cpu memory dimensions dimz; // Useful and efficient to keep the important constant information in GPU constant memory. __constant__ dimensions dimens; __host__ __device__ __forceinline__ void readIn(REALthree *temp, const REALthree *rights, const REALthree *lefts, int td, int gd) { // The index in the SHARED memory working array to place the corresponding member of right or left. #ifdef __CUDA_ARCH__ // Accesses the correct structure in constant memory. int leftidx = dimens.hts[4] + (((td>>2) & 1) * dimens.base) + (td & 3) - (4 + ((td>>2)<<1)); int rightidx = dimens.hts[4] + (((td>>2) & 1) * dimens.base) + ((td>>2)<<1) + (td & 3); #else int leftidx = dimz.hts[4] + (((td>>2) & 1) * dimz.base) + (td & 3) - (4 + ((td>>2)<<1)); int rightidx = dimz.hts[4] + (((td>>2) & 1) * dimz.base) + ((td>>2)<<1) + (td & 3); #endif temp[leftidx] = rights[gd]; temp[rightidx] = lefts[gd]; } __device__ __forceinline__ void writeOutRight(REALthree *temp, REALthree *rights, REALthree *lefts, int td, int gd, int bd) { int gdskew = (gd + bd) & dimens.idxend; //The offset for the right array. int leftidx = (((td>>2) & 1) * dimens.base) + ((td>>2)<<1) + (td & 3) + 2; int rightidx = (dimens.base-6) + (((td>>2) & 1) * dimens.base) + (td & 3) - ((td>>2)<<1); rights[gdskew] = temp[rightidx]; lefts[gd] = temp[leftidx]; } __host__ __device__ __forceinline__ void writeOutLeft(REALthree *temp, REALthree *rights, REALthree *lefts, int td, int gd, int bd) { #ifdef __CUDA_ARCH__ int gdskew = (gd - bd) & dimens.idxend; //The offset for the right array. int leftidx = (((td>>2) & 1) * dimens.base) + ((td>>2)<<1) + (td & 3) + 2; int rightidx = (dimens.base-6) + (((td>>2) & 1) * dimens.base) + (td & 3) - ((td>>2)<<1); #else int gdskew = gd; int leftidx = (((td>>2) & 1) * dimz.base) + ((td>>2)<<1) + (td & 3) + 2; int rightidx = (dimz.base-6) + (((td>>2) & 1) * dimz.base) + (td & 3) - ((td>>2)<<1); #endif rights[gd] = temp[rightidx]; lefts[gdskew] = temp[leftidx]; } __device__ __host__ __forceinline__ REAL pressure(REALthree current) { #ifdef __CUDA_ARCH__ return dimens.mgam * (current.z - (HALF * current.y * current.y/current.x)); #else return dimz.mgam * (current.z - (HALF * current.y * current.y/current.x)); #endif } __device__ __host__ __forceinline__ REAL pressureHalf(REALthree current) { #ifdef __CUDA_ARCH__ return dimens.mgam * (current.z - HALF * current.y * current.y); #else return dimz.mgam * (current.z - HALF * current.y * current.y); #endif } __device__ __host__ __forceinline__ REALthree limitor(REALthree cvCurrent, REALthree cvOther, REAL pRatio) { return (cvCurrent + HALF * min(pRatio,ONE) * (cvOther - cvCurrent)); } __device__ __host__ __forceinline__ REALthree eulerFlux(REALthree cvLeft, REALthree cvRight) { #ifndef __CUDA_ARCH__ using namespace std; #endif REAL uLeft = cvLeft.y/cvLeft.x; REAL uRight = cvRight.y/cvRight.x; REAL pL = pressure(cvLeft); REAL pR = pressure(cvRight); REALthree flux; flux.x = (cvLeft.y + cvRight.y); flux.y = (cvLeft.y*uLeft + cvRight.y*uRight + pL + pR); flux.z = (cvLeft.z*uLeft + cvRight.z*uRight + uLeft*pL + uRight*pR); return flux; } __device__ __host__ __forceinline__ REALthree eulerSpectral(REALthree cvLeft, REALthree cvRight) { #ifndef __CUDA_ARCH__ using namespace std; #endif REALthree halfState; REAL rhoLeftsqrt = SQUAREROOT(cvLeft.x); REAL rhoRightsqrt = SQUAREROOT(cvRight.x); halfState.x = rhoLeftsqrt * rhoRightsqrt; REAL halfDenom = ONE/(halfState.x*(rhoLeftsqrt + rhoRightsqrt)); halfState.y = (rhoLeftsqrt*cvRight.y + rhoRightsqrt*cvLeft.y)*halfDenom; halfState.z = (rhoLeftsqrt*cvRight.z + rhoRightsqrt*cvLeft.z)*halfDenom; REAL pH = pressureHalf(halfState); #ifdef __CUDA_ARCH__ return (SQUAREROOT(pH*dimens.gam) + fabs(halfState.y)) * (cvLeft - cvRight); #else return (SQUAREROOT(pH*dimz.gam) + fabs(halfState.y)) * (cvLeft - cvRight); #endif } __device__ __host__ REALthree eulerStutterStep(REALthree *state, int tr, char flagLeft, char flagRight) { //P1-P0 REAL pLL = (flagLeft) ? ZERO : (TWO * state[tr-1].x * state[tr-2].x * (state[tr-1].z - state[tr-2].z) + (state[tr-2].y * state[tr-2].y* state[tr-1].x - state[tr-1].y * state[tr-1].y * state[tr-2].x)) ; //P2-P1 REAL pL = (TWO * state[tr].x *state[tr-1].x * (state[tr].z - state[tr-1].z) + (state[tr-1].y * state[tr-1].y * state[tr].x - state[tr].y * state[tr].y * state[tr-1].x)); //P3-P2 REAL pR = (TWO * state[tr].x * state[tr+1].x * (state[tr+1].z - state[tr].z) + (state[tr].y * state[tr].y * state[tr+1].x - state[tr+1].y * state[tr+1].y * state[tr].x)); //P4-P3 REAL pRR = (flagRight) ? ZERO : (TWO * state[tr+1].x * state[tr+2].x * (state[tr+2].z - state[tr+1].z) + (state[tr+1].y * state[tr+1].y * state[tr+2].x - state[tr+2].y * state[tr+2].y * state[tr+1].x)); //This is the temporary state bounded by the limitor function. //Pr0 = PL/PLL*rho0/rho2 Pr0 is not -, 0, or nan. REALthree tempStateLeft = (!pLL || !pL || (pLL < 0 != pL <0)) ? state[tr-1] : limitor(state[tr-1], state[tr], (state[tr-2].x*pL/(state[tr].x*pLL))); //Pr1 = PR/PL*rho1/rho3 Pr1 is not - or nan, pass Pr1^-1. REALthree tempStateRight = (!pL || !pR || (pL < 0 != pR <0)) ? state[tr] : limitor(state[tr], state[tr-1], (state[tr+1].x*pL/(state[tr-1].x*pR))); //Pressure needs to be recalculated for the new limited state variables. REALthree flux = eulerFlux(tempStateLeft,tempStateRight); flux += eulerSpectral(tempStateLeft,tempStateRight); //Do the same thing with the right side. //Pr1 = PR/PL*rho1/rho3 Pr1 is not - or nan. tempStateLeft = (!pL || !pR || (pL < 0 != pR <0)) ? state[tr] : limitor(state[tr], state[tr+1], (state[tr-1].x*pR/(state[tr+1].x*pL))); //Pr2 = PRR/PR*rho2/rho4 Pr2 is not - or nan, pass Pr2^-1. tempStateRight = (!pRR || !pR || (pRR < 0 != pR <0)) ? state[tr+1] : limitor(state[tr+1], state[tr], (state[tr+2].x*pR/(state[tr].x*pRR))); flux -= eulerFlux(tempStateLeft,tempStateRight); flux -= eulerSpectral(tempStateLeft,tempStateRight); //Add the change back to the node in question. #ifdef __CUDA_ARCH__ return state[tr] + (QUARTER * dimens.dt_dx * flux); #else return state[tr] + (QUARTER * dimz.dt_dx * flux); #endif } __device__ __host__ REALthree eulerFinalStep(REALthree *state, int tr, char flagLeft, char flagRight) { REAL pLL = (flagLeft) ? ZERO : (TWO * state[tr-1].x * state[tr-2].x * (state[tr-1].z - state[tr-2].z) + (state[tr-2].y * state[tr-2].y* state[tr-1].x - state[tr-1].y * state[tr-1].y * state[tr-2].x)) ; REAL pL = (TWO * state[tr].x *state[tr-1].x * (state[tr].z - state[tr-1].z) + (state[tr-1].y * state[tr-1].y * state[tr].x - state[tr].y * state[tr].y * state[tr-1].x)); REAL pR = (TWO * state[tr].x * state[tr+1].x * (state[tr+1].z - state[tr].z) + (state[tr].y * state[tr].y * state[tr+1].x - state[tr+1].y * state[tr+1].y * state[tr].x)); REAL pRR = (flagRight) ? ZERO : (TWO * state[tr+1].x * state[tr+2].x * (state[tr+2].z - state[tr+1].z) + (state[tr+1].y * state[tr+1].y * state[tr+2].x - state[tr+2].y * state[tr+2].y * state[tr+1].x)); REALthree tempStateLeft = (!pLL || !pL || (pLL < 0 != pL <0)) ? state[tr-1] : limitor(state[tr-1], state[tr], (state[tr-2].x*pL/(state[tr].x*pLL))); REALthree tempStateRight = (!pL || !pR || (pL < 0 != pR <0)) ? state[tr] : limitor(state[tr], state[tr-1], (state[tr+1].x*pL/(state[tr-1].x*pR))); REALthree flux = eulerFlux(tempStateLeft,tempStateRight); flux += eulerSpectral(tempStateLeft,tempStateRight); tempStateLeft = (!pL || !pR || (pL < 0 != pR <0)) ? state[tr] : limitor(state[tr], state[tr+1], (state[tr-1].x*pR/(state[tr+1].x*pL))); tempStateRight = (!pRR || !pR || (pRR < 0 != pR <0)) ? state[tr+1] : limitor(state[tr+1], state[tr], (state[tr+2].x*pR/(state[tr].x*pRR))); flux -= eulerFlux(tempStateLeft,tempStateRight); flux -= eulerSpectral(tempStateLeft,tempStateRight); // Return only the RHS of the discretization. #ifdef __CUDA_ARCH__ return (HALF * dimens.dt_dx * flux); #else return (HALF * dimz.dt_dx * flux); #endif } __global__ void classicEuler(REALthree *euler_in, REALthree *euler_out, const bool finalstep) { int gid = blockDim.x * blockIdx.x + threadIdx.x; //Global Thread ID const char4 truth = {gid == 0, gid == 1, gid == dimens.idxend_1, gid == dimens.idxend}; if (truth.x) { euler_out[gid] = dbd[0]; return; } else if (truth.w) { euler_out[gid] = dbd[1]; return; } if (finalstep) { euler_out[gid] += eulerFinalStep(euler_in, gid, truth.y, truth.z); } else { euler_out[gid] = eulerStutterStep(euler_in, gid, truth.y, truth.z); } } __global__ void upTriangle(const REALthree *IC, REALthree *outRight, REALthree *outLeft) { extern __shared__ REALthree temper[]; int gid = blockDim.x * blockIdx.x + threadIdx.x; //Global Thread ID int tididx = threadIdx.x + 2; //Thread's lane in the node [2, blockDim.x+1] int tidxTop = tididx + dimens.base; //Thread's second row lane. int k=4; //Start k at 4 since the base and second step occur before loop. //Assign the initial values to the first row in temper, each block //has it's own version of temper shared among its threads. temper[tididx] = IC[gid]; __syncthreads(); //First step gets predictor values for lanes excluding outer two on each side. if (threadIdx.x > 1 && threadIdx.x <(blockDim.x-2)) { temper[tidxTop] = eulerStutterStep(temper, tididx, false, false); } __syncthreads(); //Step through solution excluding two more lanes on each side each step. while (k < (blockDim.x>>1)) { if (threadIdx.x < (blockDim.x-k) && threadIdx.x >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, false, false); } k+=2; __syncthreads(); if (threadIdx.x < (blockDim.x-k) && threadIdx.x >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, false, false); } k+=2; __syncthreads(); } // Passes right and keeps left writeOutRight(temper, outRight, outLeft, threadIdx.x, gid, blockDim.x); } __global__ void downTriangle(REALthree *IC, const REALthree *inRight, const REALthree *inLeft) { extern __shared__ REALthree temper[]; int gid = blockDim.x * blockIdx.x + threadIdx.x; int tididx = threadIdx.x + 2; int tidxTop = tididx + dimens.base; //k starts at blockDim.x/2 and shrinks from there as the lane width grows. int k = dimens.hts[2]; //Masks edges (whole domain edges not nodal edges) on last timestep. //Stored in one register per thread. const char4 truth = {gid == 0, gid == 1, gid == dimens.idxend_1, gid == dimens.idxend}; readIn(temper, inRight, inLeft, threadIdx.x, gid); __syncthreads(); while(k>1) { if (tididx < (dimens.base-k) && tididx >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } k-=2; __syncthreads(); if (!truth.x && !truth.w && tididx < (dimens.base-k) && tididx >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.y, truth.z); } k-=2; __syncthreads(); } IC[gid] = temper[tididx]; } __global__ void wholeDiamond(const REALthree *inRight, const REALthree *inLeft, REALthree *outRight, REALthree *outLeft) { extern __shared__ REALthree temper[]; int gid = blockDim.x * blockIdx.x + threadIdx.x; int tididx = threadIdx.x + 2; int tidxTop = tididx + dimens.base; //Masks edges in the same way as downTriangle char4 truth = {gid == 0, gid == 1, gid == dimens.idxend_1, gid == dimens.idxend}; readIn(temper, inRight, inLeft, threadIdx.x, gid); __syncthreads(); //k starts behind the downTriangle k because we need to do the first timestep outside the loop //to get the order right. int k = dimens.hts[0]; if (tididx < (dimens.base-dimens.hts[2]) && tididx >= dimens.hts[2]) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } __syncthreads(); while(k>4) { if (tididx < (dimens.base-k) && tididx >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.y, truth.z); } k -= 2; __syncthreads(); if (tididx < (dimens.base-k) && tididx >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } k -= 2; __syncthreads(); } // -------------------TOP PART------------------------------------------ if (!truth.w && !truth.x) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.y, truth.z); } __syncthreads(); if (tididx > 3 && tididx <(dimens.base-4)) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } k=6; __syncthreads(); while(k<dimens.hts[4]) { if (tididx < (dimens.base-k) && tididx >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.y, truth.z); } k+=2; __syncthreads(); if (tididx < (dimens.base-k) && tididx >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } k+=2; __syncthreads(); } writeOutRight(temper, outRight, outLeft, threadIdx.x, gid, blockDim.x); } __global__ void splitDiamond(REALthree *inRight, REALthree *inLeft, REALthree *outRight, REALthree *outLeft) { extern __shared__ REALthree temper[]; int gid = blockDim.x * blockIdx.x + threadIdx.x; int tididx = threadIdx.x + 2; int tidxTop = tididx + dimens.base; int k = dimens.hts[2]; //Starts more like downTriangle readIn(temper, inRight, inLeft, threadIdx.x, gid); //The edges are now in the center of the node 0 which is easy to find using the global id. const char4 truth = {gid == dimens.hts[0], gid == dimens.hts[1], gid == dimens.hts[2], gid == dimens.hts[3]}; __syncthreads(); //Still need to set the boundary values first because they aren't necessarily preserved in the global arrays. if (truth.z) { temper[tididx] = dbd[0]; temper[tidxTop] = dbd[0]; } if (truth.y) { temper[tididx] = dbd[1]; temper[tidxTop] = dbd[1]; } __syncthreads(); while(k>0) { if (!truth.y && !truth.z && tididx < (dimens.base-k) && tididx >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.w, truth.x); } k -= 2; __syncthreads(); if (!truth.y && !truth.z && tididx < (dimens.base-k) && tididx >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.w, truth.x); } k -= 2; __syncthreads(); } if (!truth.y && !truth.z && threadIdx.x > 1 && threadIdx.x <(blockDim.x-2)) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.w, truth.x); } __syncthreads(); k=4; while(k<dimens.hts[2]) { if (!truth.y && !truth.z && threadIdx.x < (blockDim.x-k) && threadIdx.x >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.w, truth.x); } k+=2; __syncthreads(); if (!truth.y && !truth.z && threadIdx.x < (blockDim.x-k) && threadIdx.x >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.w, truth.x); } k+=2; __syncthreads(); } writeOutLeft(temper, outRight, outLeft, threadIdx.x, gid, blockDim.x); } //Now we can set the namespace. using namespace std; //Get energy out from conserved variables for plotting. __host__ REAL energy(REALthree subj) { REAL u = subj.y/subj.x; return subj.z/subj.x - HALF*u*u; } //Parameters are straighforward and taken directly from inputs to program. Wrapper that clls the classic procedure. double classicWrapper(const int bks, int tpb, const int dv, const double dt, const double t_end, REALthree *IC, REALthree *T_f, const double freq, ofstream &fwr) { REALthree *dEuler_in, *dEuler_out; //Allocate device arrays. hipMalloc((void **)&dEuler_in, sizeof(REALthree)*dv); hipMalloc((void **)&dEuler_out, sizeof(REALthree)*dv); // Copy the initial conditions to the device array. hipMemcpy(dEuler_in,IC,sizeof(REALthree)*dv,hipMemcpyHostToDevice); //Print to make sure we're here cout << "Classic scheme" << endl; //Start the timer (simulation timer that is.) double t_eq = 0.0; double twrite = freq - QUARTER*dt; //Call the kernel to step forward alternating global arrays with each call. while (t_eq < t_end) { hipLaunchKernelGGL(( classicEuler) , dim3(bks),dim3(tpb) , 0, 0, dEuler_in, dEuler_out, false); hipLaunchKernelGGL(( classicEuler) , dim3(bks),dim3(tpb) , 0, 0, dEuler_out, dEuler_in, true); t_eq += dt; } hipMemcpy(T_f, dEuler_in, sizeof(REALthree)*dv, hipMemcpyDeviceToHost); hipFree(dEuler_in); hipFree(dEuler_out); return t_eq; } //The wrapper that enacts the swept rule. double sweptWrapper(const int bks, int tpb, const int dv, const double dt, const double t_end, REALthree *IC, REALthree *T_f, const double freq, ofstream &fwr) { const size_t smem = (2*dimz.base)*sizeof(REALthree); //Amt of shared memory to request REALthree *d_IC, *d0_right, *d0_left, *d2_right, *d2_left; // Allocate device global memory hipMalloc((void **)&d_IC, sizeof(REALthree)*dv); hipMalloc((void **)&d0_right, sizeof(REALthree)*dv); hipMalloc((void **)&d0_left, sizeof(REALthree)*dv); hipMalloc((void **)&d2_right, sizeof(REALthree)*dv); hipMalloc((void **)&d2_left, sizeof(REALthree)*dv); // Transfer over the initial conditions. hipMemcpy(d_IC,IC,sizeof(REALthree)*dv,hipMemcpyHostToDevice); // Start the simulation time counter and start the clock. const double t_fullstep = 0.25*dt*(double)tpb; //Call up first out of loop with right and left 0 hipLaunchKernelGGL(( upTriangle) , dim3(bks), dim3(tpb), smem, 0, d_IC, d0_right, d0_left); // Call the kernels until you reach the final time hipLaunchKernelGGL(( splitDiamond) , dim3(bks), dim3(tpb), smem, 0, d0_right, d0_left, d2_right, d2_left); double t_eq = t_fullstep; while(t_eq < t_end) { hipLaunchKernelGGL(( wholeDiamond) , dim3(bks), dim3(tpb), smem, 0, d2_right, d2_left, d0_right, d0_left); hipLaunchKernelGGL(( splitDiamond) , dim3(bks), dim3(tpb), smem, 0, d0_right, d0_left, d2_right, d2_left); //It always ends on a left pass since the down triangle is a right pass. t_eq += t_fullstep; } // The last call is down so call it and pass the relevant data to the host with memcpy. hipLaunchKernelGGL(( downTriangle) , dim3(bks), dim3(tpb), smem, 0, d_IC, d2_right, d2_left); hipMemcpy(T_f, d_IC, sizeof(REALthree)*dv, hipMemcpyDeviceToHost); hipFree(d_IC); hipFree(d0_right); hipFree(d0_left); hipFree(d2_right); hipFree(d2_left); return t_eq; } int main( int argc, char *argv[] ) { cout.precision(10); // Choose the GPGPU. This is device 0 in my machine which has 2 devices. hipSetDevice(GPUNUM); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); ofstream fwr; dimz.gam = 1.4; dimz.mgam = 0.4; bd[0].x = ONE; //Density bd[1].x = 0.125; bd[0].y = ZERO; //Velocity bd[1].y = ZERO; bd[0].z = ONE/dimz.mgam; //Energy bd[1].z = 0.1/dimz.mgam; const int dv = atoi(argv[1]); //Number of spatial points const int tpb = atoi(argv[2]); //Threads per Block const double dt = atof(argv[3]); //Timestep const double tf = atof(argv[4]) - QUARTER*dt; //Finish time const double freq = atof(argv[5]); //Frequency of output (i.e. every 20 s (simulation time)) const int scheme = atoi(argv[6]); //2 for Alternate, 1 for GPUShared, 0 for Classic const int bks = dv/tpb; //The number of blocks const double dx = lx/((REAL)dv-TWO); //Grid size. if (scheme) fwr.open("eulerResult/atomicS.dat", ios::trunc); else fwr.open("eulerResult/atomicC.dat", ios::trunc); fwr.precision(10); //Declare the dimensions in constant memory. dimz.dt_dx = dt/dx; // dt/dx dimz.base = tpb+4; // Length of the base of a node. dimz.idxend = dv-1; // Index of last spatial point. dimz.idxend_1 = dv-2; // 2nd to last spatial point. for (int k=-2; k<3; k++) dimz.hts[k+2] = (tpb/2) + k; //Middle values in the node (masking values) cout << "Euler Array --- #Blocks: " << bks << " | dt/dx: " << dimz.dt_dx << endl; // Initialize arrays. REALthree *IC, *T_final; hipHostMalloc((void **) &IC, dv*sizeof(REALthree), hipHostMallocDefault); // Initial conditions hipHostMalloc((void **) &T_final, dv*sizeof(REALthree), hipHostMallocDefault); // Final values for (int k = 0; k<dv; k++) IC[k] = (k<dv/2) ? bd[0] : bd[1]; // Populate initial conditions // Write out x length and then delta x and then delta t. // First item of each line is variable second is timestamp. #ifndef NOWRITE fwr << lx << " " << (dv-2) << " " << dx << " " << endl; fwr << "Density " << 0 << " "; for (int k = 1; k<(dv-1); k++) fwr << IC[k].x << " "; fwr << endl; fwr << "Velocity " << 0 << " "; for (int k = 1; k<(dv-1); k++) fwr << IC[k].y << " "; fwr << endl; fwr << "Energy " << 0 << " "; for (int k = 1; k<(dv-1); k++) fwr << IC[k].z/IC[k].x << " "; fwr << endl; fwr << "Pressure " << 0 << " "; for (int k = 1; k<(dv-1); k++) fwr << pressure(IC[k]) << " "; fwr << endl; #endif // Transfer data to GPU in constant memory. hipMemcpyToSymbol(dimens,&dimz,sizeof(dimensions)); hipMemcpyToSymbol(dbd,&bd,2*sizeof(REALthree)); // Start the counter and start the clock. hipEvent_t start, stop; float timed; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start, 0); string tpath; // Call the correct function with the correct algorithm. cout << scheme << " " ; double tfm; if (scheme) { tpath = "eulerResult/Array_Swept.csv"; tfm = sweptWrapper(bks, tpb, dv, dt, tf, IC, T_final, freq, fwr); } else { tpath = "eulerResult/Array_Classic.csv"; tfm = classicWrapper(bks, tpb, dv, dt, tf, IC, T_final, freq, fwr); } // Show the time and write out the final condition. hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime( &timed, start, stop); hipError_t error = hipGetLastError(); if(error != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(error)); exit(-1); } timed *= 1.e3; double n_timesteps = tfm/dt; double per_ts = timed/n_timesteps; cout << n_timesteps << " timesteps" << endl; cout << "Averaged " << per_ts << " microseconds (us) per timestep" << endl; FILE *timeOut; timeOut = fopen(tpath.c_str(), "a+"); fseek(timeOut, 0, SEEK_END); int ft = ftell(timeOut); if (!ft) fprintf(timeOut, "tpb,nX,time\n"); fprintf(timeOut, "%d,%d,%.8f\n", tpb, dv, per_ts); fclose(timeOut); #ifndef NOWRITE fwr << "Density " << tfm << " "; for (int k = 1; k<(dv-1); k++) fwr << T_final[k].x << " "; fwr << endl; fwr << "Velocity " << tfm << " "; for (int k = 1; k<(dv-1); k++) fwr << T_final[k].y/T_final[k].x << " "; fwr << endl; fwr << "Energy " << tfm << " "; for (int k = 1; k<(dv-1); k++) fwr << energy(T_final[k]) << " "; fwr << endl; fwr << "Pressure " << tfm << " "; for (int k = 1; k<(dv-1); k++) fwr << pressure(T_final[k]) << " "; fwr << endl; #endif fwr.close(); hipDeviceSynchronize(); hipEventDestroy( start ); hipEventDestroy( stop ); hipDeviceReset(); hipHostFree(IC); hipHostFree(T_final); return 0; }
601d67d540b30cdfc95ddc18e03210b61caea17b.cu
/** Flattening strategy for Euler - Atomic stages. COMPILE: nvcc EulerArray.cu -o ./bin/EulerArray -gencode arch=compute_35,code=sm_35 -lm -restrict -Xptxas=-v */ #include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <device_functions.h> #include "myVectorTypes.h" #include <ostream> #include <iostream> #include <cstdio> #include <cstdlib> #include <cmath> #include <fstream> #include <omp.h> // This file uses vector types to hold the dependent variables so fundamental operations on those types are defined as macros to accommodate different data types. Also, keeping types consistent for common constants (0, 1, 2, etc) used in computation has an appreciable positive effect on performance. #ifndef GPUNUM #define GPUNUM 0 #endif // We're just going to assume doubles #define REAL double #define REALtwo double2 #define REALthree double3 #define ZERO 0.0 #define QUARTER 0.25 #define HALF 0.5 #define ONE 1.0 #define TWO 2.0 #define SQUAREROOT(x) sqrt(x) // Hardwire in the length of the const REAL lx = 1.0; // The structure to carry the initial and boundary conditions. // 0 is left 1 is right. REALthree bd[2]; //dbd is the boundary condition in device constant memory. __constant__ REALthree dbd[2]; //Protoype for useful information struct. struct dimensions { REAL gam; // Heat capacity ratio REAL mgam; // 1- Heat capacity ratio REAL dt_dx; // deltat/deltax int base; // Length of node + stencils at end (4) int idxend; // Last index (number of spatial points - 1) int idxend_1; // Num spatial points - 2 int hts[5]; // The five point stencil around base/2 }; // structure of dimensions in cpu memory dimensions dimz; // Useful and efficient to keep the important constant information in GPU constant memory. __constant__ dimensions dimens; __host__ __device__ __forceinline__ void readIn(REALthree *temp, const REALthree *rights, const REALthree *lefts, int td, int gd) { // The index in the SHARED memory working array to place the corresponding member of right or left. #ifdef __CUDA_ARCH__ // Accesses the correct structure in constant memory. int leftidx = dimens.hts[4] + (((td>>2) & 1) * dimens.base) + (td & 3) - (4 + ((td>>2)<<1)); int rightidx = dimens.hts[4] + (((td>>2) & 1) * dimens.base) + ((td>>2)<<1) + (td & 3); #else int leftidx = dimz.hts[4] + (((td>>2) & 1) * dimz.base) + (td & 3) - (4 + ((td>>2)<<1)); int rightidx = dimz.hts[4] + (((td>>2) & 1) * dimz.base) + ((td>>2)<<1) + (td & 3); #endif temp[leftidx] = rights[gd]; temp[rightidx] = lefts[gd]; } __device__ __forceinline__ void writeOutRight(REALthree *temp, REALthree *rights, REALthree *lefts, int td, int gd, int bd) { int gdskew = (gd + bd) & dimens.idxend; //The offset for the right array. int leftidx = (((td>>2) & 1) * dimens.base) + ((td>>2)<<1) + (td & 3) + 2; int rightidx = (dimens.base-6) + (((td>>2) & 1) * dimens.base) + (td & 3) - ((td>>2)<<1); rights[gdskew] = temp[rightidx]; lefts[gd] = temp[leftidx]; } __host__ __device__ __forceinline__ void writeOutLeft(REALthree *temp, REALthree *rights, REALthree *lefts, int td, int gd, int bd) { #ifdef __CUDA_ARCH__ int gdskew = (gd - bd) & dimens.idxend; //The offset for the right array. int leftidx = (((td>>2) & 1) * dimens.base) + ((td>>2)<<1) + (td & 3) + 2; int rightidx = (dimens.base-6) + (((td>>2) & 1) * dimens.base) + (td & 3) - ((td>>2)<<1); #else int gdskew = gd; int leftidx = (((td>>2) & 1) * dimz.base) + ((td>>2)<<1) + (td & 3) + 2; int rightidx = (dimz.base-6) + (((td>>2) & 1) * dimz.base) + (td & 3) - ((td>>2)<<1); #endif rights[gd] = temp[rightidx]; lefts[gdskew] = temp[leftidx]; } __device__ __host__ __forceinline__ REAL pressure(REALthree current) { #ifdef __CUDA_ARCH__ return dimens.mgam * (current.z - (HALF * current.y * current.y/current.x)); #else return dimz.mgam * (current.z - (HALF * current.y * current.y/current.x)); #endif } __device__ __host__ __forceinline__ REAL pressureHalf(REALthree current) { #ifdef __CUDA_ARCH__ return dimens.mgam * (current.z - HALF * current.y * current.y); #else return dimz.mgam * (current.z - HALF * current.y * current.y); #endif } __device__ __host__ __forceinline__ REALthree limitor(REALthree cvCurrent, REALthree cvOther, REAL pRatio) { return (cvCurrent + HALF * min(pRatio,ONE) * (cvOther - cvCurrent)); } __device__ __host__ __forceinline__ REALthree eulerFlux(REALthree cvLeft, REALthree cvRight) { #ifndef __CUDA_ARCH__ using namespace std; #endif REAL uLeft = cvLeft.y/cvLeft.x; REAL uRight = cvRight.y/cvRight.x; REAL pL = pressure(cvLeft); REAL pR = pressure(cvRight); REALthree flux; flux.x = (cvLeft.y + cvRight.y); flux.y = (cvLeft.y*uLeft + cvRight.y*uRight + pL + pR); flux.z = (cvLeft.z*uLeft + cvRight.z*uRight + uLeft*pL + uRight*pR); return flux; } __device__ __host__ __forceinline__ REALthree eulerSpectral(REALthree cvLeft, REALthree cvRight) { #ifndef __CUDA_ARCH__ using namespace std; #endif REALthree halfState; REAL rhoLeftsqrt = SQUAREROOT(cvLeft.x); REAL rhoRightsqrt = SQUAREROOT(cvRight.x); halfState.x = rhoLeftsqrt * rhoRightsqrt; REAL halfDenom = ONE/(halfState.x*(rhoLeftsqrt + rhoRightsqrt)); halfState.y = (rhoLeftsqrt*cvRight.y + rhoRightsqrt*cvLeft.y)*halfDenom; halfState.z = (rhoLeftsqrt*cvRight.z + rhoRightsqrt*cvLeft.z)*halfDenom; REAL pH = pressureHalf(halfState); #ifdef __CUDA_ARCH__ return (SQUAREROOT(pH*dimens.gam) + fabs(halfState.y)) * (cvLeft - cvRight); #else return (SQUAREROOT(pH*dimz.gam) + fabs(halfState.y)) * (cvLeft - cvRight); #endif } __device__ __host__ REALthree eulerStutterStep(REALthree *state, int tr, char flagLeft, char flagRight) { //P1-P0 REAL pLL = (flagLeft) ? ZERO : (TWO * state[tr-1].x * state[tr-2].x * (state[tr-1].z - state[tr-2].z) + (state[tr-2].y * state[tr-2].y* state[tr-1].x - state[tr-1].y * state[tr-1].y * state[tr-2].x)) ; //P2-P1 REAL pL = (TWO * state[tr].x *state[tr-1].x * (state[tr].z - state[tr-1].z) + (state[tr-1].y * state[tr-1].y * state[tr].x - state[tr].y * state[tr].y * state[tr-1].x)); //P3-P2 REAL pR = (TWO * state[tr].x * state[tr+1].x * (state[tr+1].z - state[tr].z) + (state[tr].y * state[tr].y * state[tr+1].x - state[tr+1].y * state[tr+1].y * state[tr].x)); //P4-P3 REAL pRR = (flagRight) ? ZERO : (TWO * state[tr+1].x * state[tr+2].x * (state[tr+2].z - state[tr+1].z) + (state[tr+1].y * state[tr+1].y * state[tr+2].x - state[tr+2].y * state[tr+2].y * state[tr+1].x)); //This is the temporary state bounded by the limitor function. //Pr0 = PL/PLL*rho0/rho2 Pr0 is not -, 0, or nan. REALthree tempStateLeft = (!pLL || !pL || (pLL < 0 != pL <0)) ? state[tr-1] : limitor(state[tr-1], state[tr], (state[tr-2].x*pL/(state[tr].x*pLL))); //Pr1 = PR/PL*rho1/rho3 Pr1 is not - or nan, pass Pr1^-1. REALthree tempStateRight = (!pL || !pR || (pL < 0 != pR <0)) ? state[tr] : limitor(state[tr], state[tr-1], (state[tr+1].x*pL/(state[tr-1].x*pR))); //Pressure needs to be recalculated for the new limited state variables. REALthree flux = eulerFlux(tempStateLeft,tempStateRight); flux += eulerSpectral(tempStateLeft,tempStateRight); //Do the same thing with the right side. //Pr1 = PR/PL*rho1/rho3 Pr1 is not - or nan. tempStateLeft = (!pL || !pR || (pL < 0 != pR <0)) ? state[tr] : limitor(state[tr], state[tr+1], (state[tr-1].x*pR/(state[tr+1].x*pL))); //Pr2 = PRR/PR*rho2/rho4 Pr2 is not - or nan, pass Pr2^-1. tempStateRight = (!pRR || !pR || (pRR < 0 != pR <0)) ? state[tr+1] : limitor(state[tr+1], state[tr], (state[tr+2].x*pR/(state[tr].x*pRR))); flux -= eulerFlux(tempStateLeft,tempStateRight); flux -= eulerSpectral(tempStateLeft,tempStateRight); //Add the change back to the node in question. #ifdef __CUDA_ARCH__ return state[tr] + (QUARTER * dimens.dt_dx * flux); #else return state[tr] + (QUARTER * dimz.dt_dx * flux); #endif } __device__ __host__ REALthree eulerFinalStep(REALthree *state, int tr, char flagLeft, char flagRight) { REAL pLL = (flagLeft) ? ZERO : (TWO * state[tr-1].x * state[tr-2].x * (state[tr-1].z - state[tr-2].z) + (state[tr-2].y * state[tr-2].y* state[tr-1].x - state[tr-1].y * state[tr-1].y * state[tr-2].x)) ; REAL pL = (TWO * state[tr].x *state[tr-1].x * (state[tr].z - state[tr-1].z) + (state[tr-1].y * state[tr-1].y * state[tr].x - state[tr].y * state[tr].y * state[tr-1].x)); REAL pR = (TWO * state[tr].x * state[tr+1].x * (state[tr+1].z - state[tr].z) + (state[tr].y * state[tr].y * state[tr+1].x - state[tr+1].y * state[tr+1].y * state[tr].x)); REAL pRR = (flagRight) ? ZERO : (TWO * state[tr+1].x * state[tr+2].x * (state[tr+2].z - state[tr+1].z) + (state[tr+1].y * state[tr+1].y * state[tr+2].x - state[tr+2].y * state[tr+2].y * state[tr+1].x)); REALthree tempStateLeft = (!pLL || !pL || (pLL < 0 != pL <0)) ? state[tr-1] : limitor(state[tr-1], state[tr], (state[tr-2].x*pL/(state[tr].x*pLL))); REALthree tempStateRight = (!pL || !pR || (pL < 0 != pR <0)) ? state[tr] : limitor(state[tr], state[tr-1], (state[tr+1].x*pL/(state[tr-1].x*pR))); REALthree flux = eulerFlux(tempStateLeft,tempStateRight); flux += eulerSpectral(tempStateLeft,tempStateRight); tempStateLeft = (!pL || !pR || (pL < 0 != pR <0)) ? state[tr] : limitor(state[tr], state[tr+1], (state[tr-1].x*pR/(state[tr+1].x*pL))); tempStateRight = (!pRR || !pR || (pRR < 0 != pR <0)) ? state[tr+1] : limitor(state[tr+1], state[tr], (state[tr+2].x*pR/(state[tr].x*pRR))); flux -= eulerFlux(tempStateLeft,tempStateRight); flux -= eulerSpectral(tempStateLeft,tempStateRight); // Return only the RHS of the discretization. #ifdef __CUDA_ARCH__ return (HALF * dimens.dt_dx * flux); #else return (HALF * dimz.dt_dx * flux); #endif } __global__ void classicEuler(REALthree *euler_in, REALthree *euler_out, const bool finalstep) { int gid = blockDim.x * blockIdx.x + threadIdx.x; //Global Thread ID const char4 truth = {gid == 0, gid == 1, gid == dimens.idxend_1, gid == dimens.idxend}; if (truth.x) { euler_out[gid] = dbd[0]; return; } else if (truth.w) { euler_out[gid] = dbd[1]; return; } if (finalstep) { euler_out[gid] += eulerFinalStep(euler_in, gid, truth.y, truth.z); } else { euler_out[gid] = eulerStutterStep(euler_in, gid, truth.y, truth.z); } } __global__ void upTriangle(const REALthree *IC, REALthree *outRight, REALthree *outLeft) { extern __shared__ REALthree temper[]; int gid = blockDim.x * blockIdx.x + threadIdx.x; //Global Thread ID int tididx = threadIdx.x + 2; //Thread's lane in the node [2, blockDim.x+1] int tidxTop = tididx + dimens.base; //Thread's second row lane. int k=4; //Start k at 4 since the base and second step occur before loop. //Assign the initial values to the first row in temper, each block //has it's own version of temper shared among its threads. temper[tididx] = IC[gid]; __syncthreads(); //First step gets predictor values for lanes excluding outer two on each side. if (threadIdx.x > 1 && threadIdx.x <(blockDim.x-2)) { temper[tidxTop] = eulerStutterStep(temper, tididx, false, false); } __syncthreads(); //Step through solution excluding two more lanes on each side each step. while (k < (blockDim.x>>1)) { if (threadIdx.x < (blockDim.x-k) && threadIdx.x >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, false, false); } k+=2; __syncthreads(); if (threadIdx.x < (blockDim.x-k) && threadIdx.x >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, false, false); } k+=2; __syncthreads(); } // Passes right and keeps left writeOutRight(temper, outRight, outLeft, threadIdx.x, gid, blockDim.x); } __global__ void downTriangle(REALthree *IC, const REALthree *inRight, const REALthree *inLeft) { extern __shared__ REALthree temper[]; int gid = blockDim.x * blockIdx.x + threadIdx.x; int tididx = threadIdx.x + 2; int tidxTop = tididx + dimens.base; //k starts at blockDim.x/2 and shrinks from there as the lane width grows. int k = dimens.hts[2]; //Masks edges (whole domain edges not nodal edges) on last timestep. //Stored in one register per thread. const char4 truth = {gid == 0, gid == 1, gid == dimens.idxend_1, gid == dimens.idxend}; readIn(temper, inRight, inLeft, threadIdx.x, gid); __syncthreads(); while(k>1) { if (tididx < (dimens.base-k) && tididx >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } k-=2; __syncthreads(); if (!truth.x && !truth.w && tididx < (dimens.base-k) && tididx >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.y, truth.z); } k-=2; __syncthreads(); } IC[gid] = temper[tididx]; } __global__ void wholeDiamond(const REALthree *inRight, const REALthree *inLeft, REALthree *outRight, REALthree *outLeft) { extern __shared__ REALthree temper[]; int gid = blockDim.x * blockIdx.x + threadIdx.x; int tididx = threadIdx.x + 2; int tidxTop = tididx + dimens.base; //Masks edges in the same way as downTriangle char4 truth = {gid == 0, gid == 1, gid == dimens.idxend_1, gid == dimens.idxend}; readIn(temper, inRight, inLeft, threadIdx.x, gid); __syncthreads(); //k starts behind the downTriangle k because we need to do the first timestep outside the loop //to get the order right. int k = dimens.hts[0]; if (tididx < (dimens.base-dimens.hts[2]) && tididx >= dimens.hts[2]) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } __syncthreads(); while(k>4) { if (tididx < (dimens.base-k) && tididx >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.y, truth.z); } k -= 2; __syncthreads(); if (tididx < (dimens.base-k) && tididx >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } k -= 2; __syncthreads(); } // -------------------TOP PART------------------------------------------ if (!truth.w && !truth.x) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.y, truth.z); } __syncthreads(); if (tididx > 3 && tididx <(dimens.base-4)) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } k=6; __syncthreads(); while(k<dimens.hts[4]) { if (tididx < (dimens.base-k) && tididx >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.y, truth.z); } k+=2; __syncthreads(); if (tididx < (dimens.base-k) && tididx >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.y, truth.z); } k+=2; __syncthreads(); } writeOutRight(temper, outRight, outLeft, threadIdx.x, gid, blockDim.x); } __global__ void splitDiamond(REALthree *inRight, REALthree *inLeft, REALthree *outRight, REALthree *outLeft) { extern __shared__ REALthree temper[]; int gid = blockDim.x * blockIdx.x + threadIdx.x; int tididx = threadIdx.x + 2; int tidxTop = tididx + dimens.base; int k = dimens.hts[2]; //Starts more like downTriangle readIn(temper, inRight, inLeft, threadIdx.x, gid); //The edges are now in the center of the node 0 which is easy to find using the global id. const char4 truth = {gid == dimens.hts[0], gid == dimens.hts[1], gid == dimens.hts[2], gid == dimens.hts[3]}; __syncthreads(); //Still need to set the boundary values first because they aren't necessarily preserved in the global arrays. if (truth.z) { temper[tididx] = dbd[0]; temper[tidxTop] = dbd[0]; } if (truth.y) { temper[tididx] = dbd[1]; temper[tidxTop] = dbd[1]; } __syncthreads(); while(k>0) { if (!truth.y && !truth.z && tididx < (dimens.base-k) && tididx >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.w, truth.x); } k -= 2; __syncthreads(); if (!truth.y && !truth.z && tididx < (dimens.base-k) && tididx >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.w, truth.x); } k -= 2; __syncthreads(); } if (!truth.y && !truth.z && threadIdx.x > 1 && threadIdx.x <(blockDim.x-2)) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.w, truth.x); } __syncthreads(); k=4; while(k<dimens.hts[2]) { if (!truth.y && !truth.z && threadIdx.x < (blockDim.x-k) && threadIdx.x >= k) { temper[tididx] += eulerFinalStep(temper, tidxTop, truth.w, truth.x); } k+=2; __syncthreads(); if (!truth.y && !truth.z && threadIdx.x < (blockDim.x-k) && threadIdx.x >= k) { temper[tidxTop] = eulerStutterStep(temper, tididx, truth.w, truth.x); } k+=2; __syncthreads(); } writeOutLeft(temper, outRight, outLeft, threadIdx.x, gid, blockDim.x); } //Now we can set the namespace. using namespace std; //Get energy out from conserved variables for plotting. __host__ REAL energy(REALthree subj) { REAL u = subj.y/subj.x; return subj.z/subj.x - HALF*u*u; } //Parameters are straighforward and taken directly from inputs to program. Wrapper that clls the classic procedure. double classicWrapper(const int bks, int tpb, const int dv, const double dt, const double t_end, REALthree *IC, REALthree *T_f, const double freq, ofstream &fwr) { REALthree *dEuler_in, *dEuler_out; //Allocate device arrays. cudaMalloc((void **)&dEuler_in, sizeof(REALthree)*dv); cudaMalloc((void **)&dEuler_out, sizeof(REALthree)*dv); // Copy the initial conditions to the device array. cudaMemcpy(dEuler_in,IC,sizeof(REALthree)*dv,cudaMemcpyHostToDevice); //Print to make sure we're here cout << "Classic scheme" << endl; //Start the timer (simulation timer that is.) double t_eq = 0.0; double twrite = freq - QUARTER*dt; //Call the kernel to step forward alternating global arrays with each call. while (t_eq < t_end) { classicEuler <<< bks,tpb >>> (dEuler_in, dEuler_out, false); classicEuler <<< bks,tpb >>> (dEuler_out, dEuler_in, true); t_eq += dt; } cudaMemcpy(T_f, dEuler_in, sizeof(REALthree)*dv, cudaMemcpyDeviceToHost); cudaFree(dEuler_in); cudaFree(dEuler_out); return t_eq; } //The wrapper that enacts the swept rule. double sweptWrapper(const int bks, int tpb, const int dv, const double dt, const double t_end, REALthree *IC, REALthree *T_f, const double freq, ofstream &fwr) { const size_t smem = (2*dimz.base)*sizeof(REALthree); //Amt of shared memory to request REALthree *d_IC, *d0_right, *d0_left, *d2_right, *d2_left; // Allocate device global memory cudaMalloc((void **)&d_IC, sizeof(REALthree)*dv); cudaMalloc((void **)&d0_right, sizeof(REALthree)*dv); cudaMalloc((void **)&d0_left, sizeof(REALthree)*dv); cudaMalloc((void **)&d2_right, sizeof(REALthree)*dv); cudaMalloc((void **)&d2_left, sizeof(REALthree)*dv); // Transfer over the initial conditions. cudaMemcpy(d_IC,IC,sizeof(REALthree)*dv,cudaMemcpyHostToDevice); // Start the simulation time counter and start the clock. const double t_fullstep = 0.25*dt*(double)tpb; //Call up first out of loop with right and left 0 upTriangle <<<bks, tpb, smem>>> (d_IC, d0_right, d0_left); // Call the kernels until you reach the final time splitDiamond <<<bks, tpb, smem>>> (d0_right, d0_left, d2_right, d2_left); double t_eq = t_fullstep; while(t_eq < t_end) { wholeDiamond <<<bks, tpb, smem>>> (d2_right, d2_left, d0_right, d0_left); splitDiamond <<<bks, tpb, smem>>> (d0_right, d0_left, d2_right, d2_left); //It always ends on a left pass since the down triangle is a right pass. t_eq += t_fullstep; } // The last call is down so call it and pass the relevant data to the host with memcpy. downTriangle <<<bks, tpb, smem>>> (d_IC, d2_right, d2_left); cudaMemcpy(T_f, d_IC, sizeof(REALthree)*dv, cudaMemcpyDeviceToHost); cudaFree(d_IC); cudaFree(d0_right); cudaFree(d0_left); cudaFree(d2_right); cudaFree(d2_left); return t_eq; } int main( int argc, char *argv[] ) { cout.precision(10); // Choose the GPGPU. This is device 0 in my machine which has 2 devices. cudaSetDevice(GPUNUM); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); ofstream fwr; dimz.gam = 1.4; dimz.mgam = 0.4; bd[0].x = ONE; //Density bd[1].x = 0.125; bd[0].y = ZERO; //Velocity bd[1].y = ZERO; bd[0].z = ONE/dimz.mgam; //Energy bd[1].z = 0.1/dimz.mgam; const int dv = atoi(argv[1]); //Number of spatial points const int tpb = atoi(argv[2]); //Threads per Block const double dt = atof(argv[3]); //Timestep const double tf = atof(argv[4]) - QUARTER*dt; //Finish time const double freq = atof(argv[5]); //Frequency of output (i.e. every 20 s (simulation time)) const int scheme = atoi(argv[6]); //2 for Alternate, 1 for GPUShared, 0 for Classic const int bks = dv/tpb; //The number of blocks const double dx = lx/((REAL)dv-TWO); //Grid size. if (scheme) fwr.open("eulerResult/atomicS.dat", ios::trunc); else fwr.open("eulerResult/atomicC.dat", ios::trunc); fwr.precision(10); //Declare the dimensions in constant memory. dimz.dt_dx = dt/dx; // dt/dx dimz.base = tpb+4; // Length of the base of a node. dimz.idxend = dv-1; // Index of last spatial point. dimz.idxend_1 = dv-2; // 2nd to last spatial point. for (int k=-2; k<3; k++) dimz.hts[k+2] = (tpb/2) + k; //Middle values in the node (masking values) cout << "Euler Array --- #Blocks: " << bks << " | dt/dx: " << dimz.dt_dx << endl; // Initialize arrays. REALthree *IC, *T_final; cudaHostAlloc((void **) &IC, dv*sizeof(REALthree), cudaHostAllocDefault); // Initial conditions cudaHostAlloc((void **) &T_final, dv*sizeof(REALthree), cudaHostAllocDefault); // Final values for (int k = 0; k<dv; k++) IC[k] = (k<dv/2) ? bd[0] : bd[1]; // Populate initial conditions // Write out x length and then delta x and then delta t. // First item of each line is variable second is timestamp. #ifndef NOWRITE fwr << lx << " " << (dv-2) << " " << dx << " " << endl; fwr << "Density " << 0 << " "; for (int k = 1; k<(dv-1); k++) fwr << IC[k].x << " "; fwr << endl; fwr << "Velocity " << 0 << " "; for (int k = 1; k<(dv-1); k++) fwr << IC[k].y << " "; fwr << endl; fwr << "Energy " << 0 << " "; for (int k = 1; k<(dv-1); k++) fwr << IC[k].z/IC[k].x << " "; fwr << endl; fwr << "Pressure " << 0 << " "; for (int k = 1; k<(dv-1); k++) fwr << pressure(IC[k]) << " "; fwr << endl; #endif // Transfer data to GPU in constant memory. cudaMemcpyToSymbol(dimens,&dimz,sizeof(dimensions)); cudaMemcpyToSymbol(dbd,&bd,2*sizeof(REALthree)); // Start the counter and start the clock. cudaEvent_t start, stop; float timed; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0); string tpath; // Call the correct function with the correct algorithm. cout << scheme << " " ; double tfm; if (scheme) { tpath = "eulerResult/Array_Swept.csv"; tfm = sweptWrapper(bks, tpb, dv, dt, tf, IC, T_final, freq, fwr); } else { tpath = "eulerResult/Array_Classic.csv"; tfm = classicWrapper(bks, tpb, dv, dt, tf, IC, T_final, freq, fwr); } // Show the time and write out the final condition. cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime( &timed, start, stop); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } timed *= 1.e3; double n_timesteps = tfm/dt; double per_ts = timed/n_timesteps; cout << n_timesteps << " timesteps" << endl; cout << "Averaged " << per_ts << " microseconds (us) per timestep" << endl; FILE *timeOut; timeOut = fopen(tpath.c_str(), "a+"); fseek(timeOut, 0, SEEK_END); int ft = ftell(timeOut); if (!ft) fprintf(timeOut, "tpb,nX,time\n"); fprintf(timeOut, "%d,%d,%.8f\n", tpb, dv, per_ts); fclose(timeOut); #ifndef NOWRITE fwr << "Density " << tfm << " "; for (int k = 1; k<(dv-1); k++) fwr << T_final[k].x << " "; fwr << endl; fwr << "Velocity " << tfm << " "; for (int k = 1; k<(dv-1); k++) fwr << T_final[k].y/T_final[k].x << " "; fwr << endl; fwr << "Energy " << tfm << " "; for (int k = 1; k<(dv-1); k++) fwr << energy(T_final[k]) << " "; fwr << endl; fwr << "Pressure " << tfm << " "; for (int k = 1; k<(dv-1); k++) fwr << pressure(T_final[k]) << " "; fwr << endl; #endif fwr.close(); cudaDeviceSynchronize(); cudaEventDestroy( start ); cudaEventDestroy( stop ); cudaDeviceReset(); cudaFreeHost(IC); cudaFreeHost(T_final); return 0; }
e1af83ca9e24b04e69c7c72565d1a216c84a3ecb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define N 1200 #define THREADS 1024 #include <stdio.h> #include <math.h> __global__ void vecAdd(int *a, int *b, int *c); int main(){ int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; size = N*sizeof(int); hipMalloc((void**) &dev_a, size); hipMalloc((void**) &dev_b, size); hipMalloc((void**) &dev_c, size); a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); for(int i = 0; i < N; i++){ a[i] = b[i] = i; c[i] = 0; } hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( vecAdd), dim3((int)ceil(THREADS/N)),dim3(N), 0, 0, dev_a, dev_b, dev_c); hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost); for(int i = 0; i < N; i++){ printf("c[%d] = %d\n", i, c[i]); } free(a); free(b); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); exit(0); } __global__ void vecAdd(int *a, int *b, int *c){ int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < N){ c[i] = a[i] + b[i]; printf("Sou a thread %d em %d\n", threadIdx.x, i); } }
e1af83ca9e24b04e69c7c72565d1a216c84a3ecb.cu
#define N 1200 #define THREADS 1024 #include <stdio.h> #include <math.h> __global__ void vecAdd(int *a, int *b, int *c); int main(){ int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; size = N*sizeof(int); cudaMalloc((void**) &dev_a, size); cudaMalloc((void**) &dev_b, size); cudaMalloc((void**) &dev_c, size); a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); for(int i = 0; i < N; i++){ a[i] = b[i] = i; c[i] = 0; } cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); vecAdd<<<(int)ceil(THREADS/N),N>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost); for(int i = 0; i < N; i++){ printf("c[%d] = %d\n", i, c[i]); } free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); exit(0); } __global__ void vecAdd(int *a, int *b, int *c){ int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < N){ c[i] = a[i] + b[i]; printf("Sou a thread %d em %d\n", threadIdx.x, i); } }
588792af45340e00f9c621fbfdf392431a8b5983.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #define TAM 3 #define N 2 #define T 6 #define TPB 16 __global__ void sum_matrix(int** dd_mat_a,int** dd_mat_b,int** dd_mat_c, int n, int m){ int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; if( y<n && x<m ){ // revisar <<<<<<<<<<<<<<<<<<<<<<<<<<<< //*(*(dd_mat_a+y)+x)=-9; *(*(dd_mat_c+y)+x)= *(*(dd_mat_a+y)+x) + *(*(dd_mat_b+y)+x); } } void create3(int*** mat,int n, int m){ *mat = (int** )malloc(sizeof(int*)*n); (*mat)[0] = (int* )malloc(sizeof(int)*n*m); int i; for(i=1;i<n;i++){ (*mat)[i] = (*mat)[0]+i*m; } } void fill(int** mat, int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) mat[i][j] = rand()%2; } } void fill_value(int** mat,int n, int m, int value=0){ int i,j; for(i=0;i<n;i++) for(j=0;j<m;j++) mat[i][j] = value; } void print(int** mat,int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) printf("%d",mat[i][j]); printf("\n"); } } void create5(int**& mat, int**& d_mat, int**& dd_mat, int n, int m, int fillValue=-1){ int i; mat = (int** )malloc(sizeof(int*)*n); mat[0] = (int* )malloc(sizeof(int)*n*m); for(i=1;i<n;i++){ mat[i] = mat[i-1]+m; } if(fillValue==-1){ fill(mat,n,m); } else{ fill_value(mat,n,m,fillValue); } int size_row = sizeof(int*) * n; int size_col = sizeof(int ) * m; d_mat = (int**) malloc(size_row); hipMalloc((void**)& d_mat[0], sizeof(int) * m * n ); hipMemcpy(d_mat[0], mat[0], sizeof(int) * m * n ,hipMemcpyHostToDevice); for(i=1;i<n;i++){ d_mat[i]=(d_mat[i-1]+m); } hipMalloc((void***)&dd_mat,size_row); hipMemcpy(dd_mat,d_mat,size_row,hipMemcpyHostToDevice); } int main(){ if(N*T<TAM){ printf("no cubre la matriz\n"); return 0; } int n = TAM; int m = TAM; int** mat_a; int** d_mat_a; int** dd_mat_a; int** mat_b; int** d_mat_b; int** dd_mat_b; int** mat_c; int** d_mat_c; int** dd_mat_c; create5(mat_a,d_mat_a,dd_mat_a,n,m); create5(mat_b,d_mat_b,dd_mat_b,n,m); create5(mat_c,d_mat_c,dd_mat_c,n,m,0); int i; int size_row = sizeof(int*) * n; int size_col = sizeof(int ) * m; /* int** mat_a; create3(&mat_a,n,m); fill(mat_a,n,m); int **d_mat_a; int **dd_mat_a; d_mat_a = (int**) malloc(size_row); hipMalloc((void**)& d_mat_a[0], sizeof(int) * m * n ); hipMemcpy(d_mat_a[0], mat_a[0], sizeof(int) * m * n ,hipMemcpyHostToDevice); for(i=1;i<n;i++){ d_mat_a[i]=(d_mat_a[i-1]+m); } hipMalloc((void***)&dd_mat_a,size_row); hipMemcpy(dd_mat_a,d_mat_a,size_row,hipMemcpyHostToDevice); */ print(mat_a,n,m); printf("//////////////////\n"); print(mat_b,n,m); printf("//////////////////\n"); print(mat_c,n,m); printf("//////////////////\n"); printf("//////////////////\n"); dim3 blockNum(TPB,TPB,1); dim3 grid((blockNum.x-1+n)/blockNum.x,(blockNum.y-1+m)/blockNum.y,1); hipLaunchKernelGGL(( sum_matrix), dim3(grid),dim3(blockNum), 0, 0, dd_mat_a,dd_mat_b,dd_mat_c,n,m); for(i=0;i<n;i++){ hipMemcpy(mat_c[i],d_mat_c[i],size_col,hipMemcpyDeviceToHost); } printf("//////////////////\n"); printf("//////////////////\n"); //print(mat_a,n,m); printf("//////////////////\n"); //print(mat_b,n,m); printf("//////////////////\n"); print(mat_c,n,m); return 0; }
588792af45340e00f9c621fbfdf392431a8b5983.cu
#include <stdlib.h> #include <stdio.h> #define TAM 3 #define N 2 #define T 6 #define TPB 16 __global__ void sum_matrix(int** dd_mat_a,int** dd_mat_b,int** dd_mat_c, int n, int m){ int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; if( y<n && x<m ){ // revisar <<<<<<<<<<<<<<<<<<<<<<<<<<<< //*(*(dd_mat_a+y)+x)=-9; *(*(dd_mat_c+y)+x)= *(*(dd_mat_a+y)+x) + *(*(dd_mat_b+y)+x); } } void create3(int*** mat,int n, int m){ *mat = (int** )malloc(sizeof(int*)*n); (*mat)[0] = (int* )malloc(sizeof(int)*n*m); int i; for(i=1;i<n;i++){ (*mat)[i] = (*mat)[0]+i*m; } } void fill(int** mat, int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) mat[i][j] = rand()%2; } } void fill_value(int** mat,int n, int m, int value=0){ int i,j; for(i=0;i<n;i++) for(j=0;j<m;j++) mat[i][j] = value; } void print(int** mat,int n, int m){ int i,j; for(i=0; i<n ;i++){ for(j=0; j<m ;j++) printf("%d",mat[i][j]); printf("\n"); } } void create5(int**& mat, int**& d_mat, int**& dd_mat, int n, int m, int fillValue=-1){ int i; mat = (int** )malloc(sizeof(int*)*n); mat[0] = (int* )malloc(sizeof(int)*n*m); for(i=1;i<n;i++){ mat[i] = mat[i-1]+m; } if(fillValue==-1){ fill(mat,n,m); } else{ fill_value(mat,n,m,fillValue); } int size_row = sizeof(int*) * n; int size_col = sizeof(int ) * m; d_mat = (int**) malloc(size_row); cudaMalloc((void**)& d_mat[0], sizeof(int) * m * n ); cudaMemcpy(d_mat[0], mat[0], sizeof(int) * m * n ,cudaMemcpyHostToDevice); for(i=1;i<n;i++){ d_mat[i]=(d_mat[i-1]+m); } cudaMalloc((void***)&dd_mat,size_row); cudaMemcpy(dd_mat,d_mat,size_row,cudaMemcpyHostToDevice); } int main(){ if(N*T<TAM){ printf("no cubre la matriz\n"); return 0; } int n = TAM; int m = TAM; int** mat_a; int** d_mat_a; int** dd_mat_a; int** mat_b; int** d_mat_b; int** dd_mat_b; int** mat_c; int** d_mat_c; int** dd_mat_c; create5(mat_a,d_mat_a,dd_mat_a,n,m); create5(mat_b,d_mat_b,dd_mat_b,n,m); create5(mat_c,d_mat_c,dd_mat_c,n,m,0); int i; int size_row = sizeof(int*) * n; int size_col = sizeof(int ) * m; /* int** mat_a; create3(&mat_a,n,m); fill(mat_a,n,m); int **d_mat_a; int **dd_mat_a; d_mat_a = (int**) malloc(size_row); cudaMalloc((void**)& d_mat_a[0], sizeof(int) * m * n ); cudaMemcpy(d_mat_a[0], mat_a[0], sizeof(int) * m * n ,cudaMemcpyHostToDevice); for(i=1;i<n;i++){ d_mat_a[i]=(d_mat_a[i-1]+m); } cudaMalloc((void***)&dd_mat_a,size_row); cudaMemcpy(dd_mat_a,d_mat_a,size_row,cudaMemcpyHostToDevice); */ print(mat_a,n,m); printf("//////////////////\n"); print(mat_b,n,m); printf("//////////////////\n"); print(mat_c,n,m); printf("//////////////////\n"); printf("//////////////////\n"); dim3 blockNum(TPB,TPB,1); dim3 grid((blockNum.x-1+n)/blockNum.x,(blockNum.y-1+m)/blockNum.y,1); sum_matrix<<<grid,blockNum>>>(dd_mat_a,dd_mat_b,dd_mat_c,n,m); for(i=0;i<n;i++){ cudaMemcpy(mat_c[i],d_mat_c[i],size_col,cudaMemcpyDeviceToHost); } printf("//////////////////\n"); printf("//////////////////\n"); //print(mat_a,n,m); printf("//////////////////\n"); //print(mat_b,n,m); printf("//////////////////\n"); print(mat_c,n,m); return 0; }
fb4f17e5322b4321da2b812d371f05b0979f11ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ // INSERT KERNEL CODE HERE __shared__ float partialSum[2*BLOCK_SIZE]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * blockDim.x; if(start + t < size) partialSum[t] = in[start + t]; else partialSum[t] = 0.0; if(start + blockDim.x + t < size) partialSum[blockDim.x + t] = in[start + blockDim.x + t]; else partialSum[blockDim.x + t] = 0.0; for(unsigned int stride=blockDim.x; stride>=1; stride/=2){ __syncthreads(); if(t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); if(t==0) out[blockIdx.x] = partialSum[0]; }
fb4f17e5322b4321da2b812d371f05b0979f11ba.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 __global__ void reduction(float *out, float *in, unsigned size) { /******************************************************************** Load a segment of the input vector into shared memory Traverse the reduction tree Write the computed sum to the output vector at the correct index ********************************************************************/ // INSERT KERNEL CODE HERE __shared__ float partialSum[2*BLOCK_SIZE]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * blockDim.x; if(start + t < size) partialSum[t] = in[start + t]; else partialSum[t] = 0.0; if(start + blockDim.x + t < size) partialSum[blockDim.x + t] = in[start + blockDim.x + t]; else partialSum[blockDim.x + t] = 0.0; for(unsigned int stride=blockDim.x; stride>=1; stride/=2){ __syncthreads(); if(t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); if(t==0) out[blockIdx.x] = partialSum[0]; }
a5021c770c55cded928307497f8257e44ac5afc6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <chrono> #include <hip/hip_runtime.h> #include "kernels.h" int PowTwoDivider(int n) { if (n == 0) return 0; int divider = 1; while ((n & divider) == 0) divider <<= 1; return divider; } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <width> <height> <repeat>\n", argv[0]); return 1; } const int width = atoi(argv[1]); const int height = atoi(argv[2]); const int repeat = atoi(argv[3]); const int image_pitch = width * sizeof(float); const int numPix = width * height; const int image_size = numPix * sizeof(float); float *image = (float*) malloc (image_size); // image image with random values srand(123); for (int i = 0; i < numPix; i++) { uint x = rand() % 256; uint y = rand() % 256; uint z = rand() % 256; uint w = rand() % 256; *(uint*)(&image[i]) = (w << 24) | (z << 16) | (y << 8) | x; } float *d_image; hipMalloc((void**)&d_image, image_size); int blocks = ::min(PowTwoDivider(height), 64); dim3 dimBlockX (blocks); dim3 dimGridX ((height + blocks - 1) / blocks); blocks = ::min(PowTwoDivider(width), 64); dim3 dimBlockY (blocks); dim3 dimGridY ((width + blocks - 1) / blocks); long total_time = 0; for (int i = 0; i < repeat; i++) { hipMemcpy(d_image, image, image_size, hipMemcpyHostToDevice); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(toCoef2DX, dimGridX, dimBlockX, 0, 0, d_image, image_pitch, width, height); hipLaunchKernelGGL(toCoef2DY, dimGridY, dimBlockY, 0, 0, d_image, image_pitch, width, height); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); total_time += time; } printf("Average kernel execution time %f (s)\n", total_time * 1e-9f / repeat); hipMemcpy(image, d_image, image_size, hipMemcpyDeviceToHost); hipFree(d_image); float sum = 0.f; for (int i = 0; i < numPix; i++) { const uchar *t = (const uchar*)(&image[i]); sum += (t[0] + t[1] + t[2] + t[3]) / 4; } printf("Checksum: %f\n", sum / numPix); free(image); return 0; }
a5021c770c55cded928307497f8257e44ac5afc6.cu
#include <stdio.h> #include <stdlib.h> #include <algorithm> #include <chrono> #include <hip/hip_runtime.h> #include "kernels.h" int PowTwoDivider(int n) { if (n == 0) return 0; int divider = 1; while ((n & divider) == 0) divider <<= 1; return divider; } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <width> <height> <repeat>\n", argv[0]); return 1; } const int width = atoi(argv[1]); const int height = atoi(argv[2]); const int repeat = atoi(argv[3]); const int image_pitch = width * sizeof(float); const int numPix = width * height; const int image_size = numPix * sizeof(float); float *image = (float*) malloc (image_size); // image image with random values srand(123); for (int i = 0; i < numPix; i++) { uint x = rand() % 256; uint y = rand() % 256; uint z = rand() % 256; uint w = rand() % 256; *(uint*)(&image[i]) = (w << 24) | (z << 16) | (y << 8) | x; } float *d_image; hipMalloc((void**)&d_image, image_size); int blocks = std::min(PowTwoDivider(height), 64); dim3 dimBlockX (blocks); dim3 dimGridX ((height + blocks - 1) / blocks); blocks = std::min(PowTwoDivider(width), 64); dim3 dimBlockY (blocks); dim3 dimGridY ((width + blocks - 1) / blocks); long total_time = 0; for (int i = 0; i < repeat; i++) { hipMemcpy(d_image, image, image_size, hipMemcpyHostToDevice); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(toCoef2DX, dimGridX, dimBlockX, 0, 0, d_image, image_pitch, width, height); hipLaunchKernelGGL(toCoef2DY, dimGridY, dimBlockY, 0, 0, d_image, image_pitch, width, height); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); total_time += time; } printf("Average kernel execution time %f (s)\n", total_time * 1e-9f / repeat); hipMemcpy(image, d_image, image_size, hipMemcpyDeviceToHost); hipFree(d_image); float sum = 0.f; for (int i = 0; i < numPix; i++) { const uchar *t = (const uchar*)(&image[i]); sum += (t[0] + t[1] + t[2] + t[3]) / 4; } printf("Checksum: %f\n", sum / numPix); free(image); return 0; }
bc5c900eb026818d104c6f008ae837a3b81b31b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ #include "gg.h" #include "ggcuda.h" #include "hipcub/hipcub.hpp" #include "hipcub/hipcub.hpp" #include "thread_work.h" mgpu::standard_context_t context; void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=False $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=1 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=False $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; AppendOnlyList el; #include "mst.h" #define INF UINT_MAX const int DEBUG = 0; static const int __tb_union_components = TB_SIZE; __global__ void init_wl(CSRGraph graph, Worklist2 in_wl, Worklist2 out_wl) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; if (tid == 0) in_wl.reset_next_slot(); index_type node_end; // FP: "1 -> 2; node_end = (graph).nnodes; for (index_type node = 0 + tid; node < node_end; node += nthreads) { (out_wl).push(node); } // FP: "4 -> 5; } __global__ void find_comp_min_elem(CSRGraph graph, struct comp_data comp, LockArrayTicket complocks, ComponentSpace cs, int level, AppendOnlyList bosses, Worklist2 in_wl, Worklist2 out_wl) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; if (tid == 0) in_wl.reset_next_slot(); index_type wlnode_end; // FP: "1 -> 2; // FP: "2 -> 3; wlnode_end = *((volatile index_type *) (in_wl).dindex); for (index_type wlnode = 0 + tid; wlnode < wlnode_end; wlnode += nthreads) { int node; bool pop; index_type edge_end; pop = (in_wl).pop_id(wlnode, node); unsigned minwt = INF; unsigned minedge = INF; int degree = graph.getOutDegree(node); int mindstcomp = 0; int srccomp = cs.find(node); bool isBoss = srccomp == node; edge_end = (graph).getFirstEdge((node) + 1); for (index_type edge = (graph).getFirstEdge(node) + 0; edge < edge_end; edge += 1) { int edgewt = graph.getAbsWeight(edge); if (edgewt < minwt) { int dstcomp = cs.find(graph.getAbsDestination(edge)); if (dstcomp != srccomp) { minwt = edgewt; minedge = edge; } } } if (isBoss && degree) { bosses.push(node); } if (minwt != INF) { (out_wl).push(node); { #if __CUDACC_VER_MAJOR__ >= 7 volatile bool done_ = false; #else bool done_ = false; #endif int _ticket = (complocks).reserve(srccomp); while (!done_) { if (complocks.acquire_or_fail(srccomp, _ticket)) { if (comp.minwt[srccomp] == 0 || (comp.lvl[srccomp] < level) || (comp.minwt[srccomp] > minwt)) { comp.minwt[srccomp] = minwt; comp.lvl[srccomp] = level; comp.minedge[srccomp] = minedge; } complocks.release(srccomp); done_ = true; } } } } else { if (isBoss && degree) { (out_wl).push(node); } } } // FP: "30 -> 31; } __global__ void union_components(CSRGraph graph, ComponentSpace cs, struct comp_data compdata, int level, AppendOnlyList el, AppendOnlyList ew, AppendOnlyList b_in, AppendOnlyList b_out, Worklist2 in_wl, Worklist2 out_wl, GlobalBarrier gb, HGAccumulator<int> ret_val) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; typedef hipcub::BlockReduce<int, TB_SIZE> _br; __shared__ _br::TempStorage _ts; ret_val.thread_entry(); if (tid == 0) in_wl.reset_next_slot(); index_type wlnode_end; index_type wlnode_rup; // FP: "1 -> 2; wlnode_end = *((volatile index_type *) (b_in).dindex); wlnode_rup = ((0) + roundup(((*((volatile index_type *) (b_in).dindex)) - (0)), (nthreads))); for (index_type wlnode = 0 + tid; wlnode < wlnode_rup; wlnode += nthreads) { int node; bool pop; pop = (b_in).pop_id(wlnode, node); int r = 0; int dstcomp = -1; int srccomp = -1; if (pop && compdata.lvl[node] == level) { srccomp = cs.find(node); dstcomp = cs.find(graph.getAbsDestination(compdata.minedge[node])); } gb.Sync(); if (srccomp != dstcomp) { if (!cs.unify(srccomp, dstcomp)) { b_out.push(node); r = 1; } else { el.push(compdata.minedge[node]); ew.push(compdata.minwt[node]); } } gb.Sync(); if (r) { ret_val.reduce(true); continue; } } ret_val.thread_exit<_br>(_ts); } void gg_main(CSRGraph& hg, CSRGraph& gg) { dim3 blocks, threads; kernel_sizing(gg, blocks, threads); static GlobalBarrierLifetime union_components_barrier; static bool union_components_barrier_inited; struct comp_data comp; PipeContextT<Worklist2> pipe; // FP: "1 -> 2; ComponentSpace cs (hg.nnodes); // FP: "2 -> 3; el = AppendOnlyList(hg.nedges); // FP: "3 -> 4; AppendOnlyList ew (hg.nedges); // FP: "4 -> 5; AppendOnlyList bosses[2] = {AppendOnlyList(hg.nnodes), AppendOnlyList(hg.nnodes)}; int cur_boss = 0; // FP: "5 -> 6; static const size_t union_components_residency = maximum_residency(union_components, __tb_union_components, 0); static const size_t union_components_blocks = GG_MIN(blocks.x, ggc_get_nSM() * union_components_residency); if(!union_components_barrier_inited) { union_components_barrier.Setup(union_components_blocks); union_components_barrier_inited = true;}; // FP: "6 -> 7; // FP: "7 -> 8; comp.weight.alloc(hg.nnodes); comp.edge.alloc(hg.nnodes); comp.node.alloc(hg.nnodes); comp.level.alloc(hg.nnodes); comp.dstcomp.alloc(hg.nnodes); comp.lvl = comp.level.zero_gpu(); comp.minwt = comp.weight.zero_gpu(); comp.minedge = comp.edge.gpu_wr_ptr(); comp.minnode = comp.node.gpu_wr_ptr(); comp.mindstcomp = comp.dstcomp.gpu_wr_ptr(); // FP: "8 -> 9; LockArrayTicket complocks (hg.nnodes); // FP: "9 -> 10; int level = 1; int mw = 0; int last_mw = 0; // FP: "10 -> 11; pipe = PipeContextT<Worklist2>(hg.nnodes); { { pipe.out_wl().will_write(); hipLaunchKernelGGL(( init_wl) , dim3(blocks), dim3(threads), 0, 0, gg, pipe.in_wl(), pipe.out_wl()); pipe.in_wl().swap_slots(); pipe.advance2(); // FP: "12 -> 13; while (pipe.in_wl().nitems()) { bool loopc = false; last_mw = mw; pipe.out_wl().will_write(); hipLaunchKernelGGL(( find_comp_min_elem) , dim3(blocks), dim3(threads), 0, 0, gg, comp, complocks, cs, level, bosses[cur_boss], pipe.in_wl(), pipe.out_wl()); pipe.in_wl().swap_slots(); pipe.advance2(); do { Shared<int> retval = Shared<int>(1); HGAccumulator<int> _rv; *(retval.cpu_wr_ptr()) = 0; _rv.rv = retval.gpu_wr_ptr(); pipe.out_wl().will_write(); hipLaunchKernelGGL(( union_components) , dim3(union_components_blocks), dim3(__tb_union_components), 0, 0, gg, cs, comp, level, el, ew, bosses[cur_boss], bosses[cur_boss ^ 1], pipe.in_wl(), pipe.out_wl(), union_components_barrier, _rv); loopc = *(retval.cpu_rd_ptr()) > 0; cur_boss ^= 1; bosses[cur_boss].reset(); } while (loopc); mw = el.nitems(); level++; if (last_mw == mw) { break; } } // FP: "23 -> 24; } } pipe.free(); // FP: "11 -> 12; unsigned long int rweight = 0; size_t nmstedges ; // FP: "24 -> 25; nmstedges = ew.nitems(); mgpu::reduce(ew.list.gpu_rd_ptr(), nmstedges, &rweight, mgpu::plus_t<long unsigned int>(), context); // FP: "25 -> 26; printf("final mstwt: %llu\n", rweight); printf("total edges: %llu, total components: %llu\n", nmstedges, cs.numberOfComponentsHost()); // FP: "26 -> 27; }
bc5c900eb026818d104c6f008ae837a3b81b31b4.cu
/* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ #include "gg.h" #include "ggcuda.h" #include "cub/cub.cuh" #include "cub/util_allocator.cuh" #include "thread_work.h" mgpu::standard_context_t context; void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=False $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=1 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=False $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; AppendOnlyList el; #include "mst.h" #define INF UINT_MAX const int DEBUG = 0; static const int __tb_union_components = TB_SIZE; __global__ void init_wl(CSRGraph graph, Worklist2 in_wl, Worklist2 out_wl) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; if (tid == 0) in_wl.reset_next_slot(); index_type node_end; // FP: "1 -> 2; node_end = (graph).nnodes; for (index_type node = 0 + tid; node < node_end; node += nthreads) { (out_wl).push(node); } // FP: "4 -> 5; } __global__ void find_comp_min_elem(CSRGraph graph, struct comp_data comp, LockArrayTicket complocks, ComponentSpace cs, int level, AppendOnlyList bosses, Worklist2 in_wl, Worklist2 out_wl) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; if (tid == 0) in_wl.reset_next_slot(); index_type wlnode_end; // FP: "1 -> 2; // FP: "2 -> 3; wlnode_end = *((volatile index_type *) (in_wl).dindex); for (index_type wlnode = 0 + tid; wlnode < wlnode_end; wlnode += nthreads) { int node; bool pop; index_type edge_end; pop = (in_wl).pop_id(wlnode, node); unsigned minwt = INF; unsigned minedge = INF; int degree = graph.getOutDegree(node); int mindstcomp = 0; int srccomp = cs.find(node); bool isBoss = srccomp == node; edge_end = (graph).getFirstEdge((node) + 1); for (index_type edge = (graph).getFirstEdge(node) + 0; edge < edge_end; edge += 1) { int edgewt = graph.getAbsWeight(edge); if (edgewt < minwt) { int dstcomp = cs.find(graph.getAbsDestination(edge)); if (dstcomp != srccomp) { minwt = edgewt; minedge = edge; } } } if (isBoss && degree) { bosses.push(node); } if (minwt != INF) { (out_wl).push(node); { #if __CUDACC_VER_MAJOR__ >= 7 volatile bool done_ = false; #else bool done_ = false; #endif int _ticket = (complocks).reserve(srccomp); while (!done_) { if (complocks.acquire_or_fail(srccomp, _ticket)) { if (comp.minwt[srccomp] == 0 || (comp.lvl[srccomp] < level) || (comp.minwt[srccomp] > minwt)) { comp.minwt[srccomp] = minwt; comp.lvl[srccomp] = level; comp.minedge[srccomp] = minedge; } complocks.release(srccomp); done_ = true; } } } } else { if (isBoss && degree) { (out_wl).push(node); } } } // FP: "30 -> 31; } __global__ void union_components(CSRGraph graph, ComponentSpace cs, struct comp_data compdata, int level, AppendOnlyList el, AppendOnlyList ew, AppendOnlyList b_in, AppendOnlyList b_out, Worklist2 in_wl, Worklist2 out_wl, GlobalBarrier gb, HGAccumulator<int> ret_val) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; typedef cub::BlockReduce<int, TB_SIZE> _br; __shared__ _br::TempStorage _ts; ret_val.thread_entry(); if (tid == 0) in_wl.reset_next_slot(); index_type wlnode_end; index_type wlnode_rup; // FP: "1 -> 2; wlnode_end = *((volatile index_type *) (b_in).dindex); wlnode_rup = ((0) + roundup(((*((volatile index_type *) (b_in).dindex)) - (0)), (nthreads))); for (index_type wlnode = 0 + tid; wlnode < wlnode_rup; wlnode += nthreads) { int node; bool pop; pop = (b_in).pop_id(wlnode, node); int r = 0; int dstcomp = -1; int srccomp = -1; if (pop && compdata.lvl[node] == level) { srccomp = cs.find(node); dstcomp = cs.find(graph.getAbsDestination(compdata.minedge[node])); } gb.Sync(); if (srccomp != dstcomp) { if (!cs.unify(srccomp, dstcomp)) { b_out.push(node); r = 1; } else { el.push(compdata.minedge[node]); ew.push(compdata.minwt[node]); } } gb.Sync(); if (r) { ret_val.reduce(true); continue; } } ret_val.thread_exit<_br>(_ts); } void gg_main(CSRGraph& hg, CSRGraph& gg) { dim3 blocks, threads; kernel_sizing(gg, blocks, threads); static GlobalBarrierLifetime union_components_barrier; static bool union_components_barrier_inited; struct comp_data comp; PipeContextT<Worklist2> pipe; // FP: "1 -> 2; ComponentSpace cs (hg.nnodes); // FP: "2 -> 3; el = AppendOnlyList(hg.nedges); // FP: "3 -> 4; AppendOnlyList ew (hg.nedges); // FP: "4 -> 5; AppendOnlyList bosses[2] = {AppendOnlyList(hg.nnodes), AppendOnlyList(hg.nnodes)}; int cur_boss = 0; // FP: "5 -> 6; static const size_t union_components_residency = maximum_residency(union_components, __tb_union_components, 0); static const size_t union_components_blocks = GG_MIN(blocks.x, ggc_get_nSM() * union_components_residency); if(!union_components_barrier_inited) { union_components_barrier.Setup(union_components_blocks); union_components_barrier_inited = true;}; // FP: "6 -> 7; // FP: "7 -> 8; comp.weight.alloc(hg.nnodes); comp.edge.alloc(hg.nnodes); comp.node.alloc(hg.nnodes); comp.level.alloc(hg.nnodes); comp.dstcomp.alloc(hg.nnodes); comp.lvl = comp.level.zero_gpu(); comp.minwt = comp.weight.zero_gpu(); comp.minedge = comp.edge.gpu_wr_ptr(); comp.minnode = comp.node.gpu_wr_ptr(); comp.mindstcomp = comp.dstcomp.gpu_wr_ptr(); // FP: "8 -> 9; LockArrayTicket complocks (hg.nnodes); // FP: "9 -> 10; int level = 1; int mw = 0; int last_mw = 0; // FP: "10 -> 11; pipe = PipeContextT<Worklist2>(hg.nnodes); { { pipe.out_wl().will_write(); init_wl <<<blocks, threads>>>(gg, pipe.in_wl(), pipe.out_wl()); pipe.in_wl().swap_slots(); pipe.advance2(); // FP: "12 -> 13; while (pipe.in_wl().nitems()) { bool loopc = false; last_mw = mw; pipe.out_wl().will_write(); find_comp_min_elem <<<blocks, threads>>>(gg, comp, complocks, cs, level, bosses[cur_boss], pipe.in_wl(), pipe.out_wl()); pipe.in_wl().swap_slots(); pipe.advance2(); do { Shared<int> retval = Shared<int>(1); HGAccumulator<int> _rv; *(retval.cpu_wr_ptr()) = 0; _rv.rv = retval.gpu_wr_ptr(); pipe.out_wl().will_write(); union_components <<<union_components_blocks, __tb_union_components>>>(gg, cs, comp, level, el, ew, bosses[cur_boss], bosses[cur_boss ^ 1], pipe.in_wl(), pipe.out_wl(), union_components_barrier, _rv); loopc = *(retval.cpu_rd_ptr()) > 0; cur_boss ^= 1; bosses[cur_boss].reset(); } while (loopc); mw = el.nitems(); level++; if (last_mw == mw) { break; } } // FP: "23 -> 24; } } pipe.free(); // FP: "11 -> 12; unsigned long int rweight = 0; size_t nmstedges ; // FP: "24 -> 25; nmstedges = ew.nitems(); mgpu::reduce(ew.list.gpu_rd_ptr(), nmstedges, &rweight, mgpu::plus_t<long unsigned int>(), context); // FP: "25 -> 26; printf("final mstwt: %llu\n", rweight); printf("total edges: %llu, total components: %llu\n", nmstedges, cs.numberOfComponentsHost()); // FP: "26 -> 27; }
910bf3dab2d6a830d10839d2364a1680c395fc5d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ hipComplex helvp(hipComplex z) { hipComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ hipComplex lanna(hipComplex z) { hipComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ hipComplex harva(hipComplex z) { hipComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ hipComplex herve(hipComplex z) { hipComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ hipComplex alver(hipComplex z) { hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex alvir(hipComplex z) { hipComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex hexva(int m, hipComplex z) { hipComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex ahilv(hipComplex z) { hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex aciwa(hipComplex z) { hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thy(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(ahilv(q*z),ahilv(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale =20; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(1.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex vue = cue; hipComplex rhuva(3.0,0.0); hipComplex rarva(3.0,0.0); hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<3;v++) { cue =cue- powc(uon,hilva(cue))-powc(hilva(cue),aon); accume = accume + powc(uon,hilva(cue)); } cue = the3(accume,flat(moeb(uon,faxon,cue))); for(v=0;v<3;v++) { cue =cue- powc(uon,hilva(cue))-powc(hilva(cue),aon); accume = accume + powc(uon,hilva(cue)); } cue = the3(accume,flat(moeb(aon,fixon,cue))); double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
910bf3dab2d6a830d10839d2364a1680c395fc5d.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ cuComplex helvp(cuComplex z) { cuComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ cuComplex lanna(cuComplex z) { cuComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ cuComplex harva(cuComplex z) { cuComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ cuComplex herve(cuComplex z) { cuComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ cuComplex alver(cuComplex z) { cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex alvir(cuComplex z) { cuComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex hexva(int m, cuComplex z) { cuComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex ahilv(cuComplex z) { cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex aciwa(cuComplex z) { cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thy(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(ahilv(q*z),ahilv(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale =20; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(1.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex vue = cue; cuComplex rhuva(3.0,0.0); cuComplex rarva(3.0,0.0); cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<3;v++) { cue =cue- powc(uon,hilva(cue))-powc(hilva(cue),aon); accume = accume + powc(uon,hilva(cue)); } cue = the3(accume,flat(moeb(uon,faxon,cue))); for(v=0;v<3;v++) { cue =cue- powc(uon,hilva(cue))-powc(hilva(cue),aon); accume = accume + powc(uon,hilva(cue)); } cue = the3(accume,flat(moeb(aon,fixon,cue))); double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
cd96a9419e9c399b75e30445dfbc86b9b0578be6.hip
// !!! This is a file automatically generated by hipify!!! // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2019 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Asher Elmquist // ============================================================================= // // ============================================================================= #include <hip/hip_runtime.h> #include "pointcloud.cuh" namespace chrono { namespace sensor { // Converts 32bpp ARGB imgIn pixels to 8bpp Grayscale imgOut pixels __global__ void pointcloud_from_depth_kernel(float* imgIn, float* imgOut, int numPixels, LidarParams params) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < numPixels) { int hIndex = index % params.horizontal_samples; int vIndex = index / params.horizontal_samples; float vAngle = (vIndex / (float)(params.vertical_samples)) * params.vFOV - params.vFOV / 2.; float hAngle = (hIndex / (float)(params.horizontal_samples)) * params.hFOV - params.hFOV / 2.; float range = imgIn[2 * index]; float proj_xy = range * cos(vAngle); float x = proj_xy * cos(hAngle); float y = proj_xy * sin(hAngle); float z = range * sin(vAngle); imgOut[4 * index] = x; imgOut[4 * index + 1] = y; imgOut[4 * index + 2] = z; imgOut[4 * index + 3] = imgIn[2 * index + 1]; } } void cuda_pointcloud_from_depth(void* bufDI, void* bufOut, int width, int height, LidarParams params) { int numPixels = width * height; const int nThreads = 512; int nBlocks = (numPixels + nThreads - 1) / nThreads; hipLaunchKernelGGL(( pointcloud_from_depth_kernel), dim3(nBlocks), dim3(nThreads), 0, 0, (float*)bufDI, (float*)bufOut, numPixels, params); } } // namespace sensor } // namespace chrono
cd96a9419e9c399b75e30445dfbc86b9b0578be6.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2019 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Asher Elmquist // ============================================================================= // // ============================================================================= #include <cuda.h> #include "pointcloud.cuh" namespace chrono { namespace sensor { // Converts 32bpp ARGB imgIn pixels to 8bpp Grayscale imgOut pixels __global__ void pointcloud_from_depth_kernel(float* imgIn, float* imgOut, int numPixels, LidarParams params) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < numPixels) { int hIndex = index % params.horizontal_samples; int vIndex = index / params.horizontal_samples; float vAngle = (vIndex / (float)(params.vertical_samples)) * params.vFOV - params.vFOV / 2.; float hAngle = (hIndex / (float)(params.horizontal_samples)) * params.hFOV - params.hFOV / 2.; float range = imgIn[2 * index]; float proj_xy = range * cos(vAngle); float x = proj_xy * cos(hAngle); float y = proj_xy * sin(hAngle); float z = range * sin(vAngle); imgOut[4 * index] = x; imgOut[4 * index + 1] = y; imgOut[4 * index + 2] = z; imgOut[4 * index + 3] = imgIn[2 * index + 1]; } } void cuda_pointcloud_from_depth(void* bufDI, void* bufOut, int width, int height, LidarParams params) { int numPixels = width * height; const int nThreads = 512; int nBlocks = (numPixels + nThreads - 1) / nThreads; pointcloud_from_depth_kernel<<<nBlocks, nThreads>>>((float*)bufDI, (float*)bufOut, numPixels, params); } } // namespace sensor } // namespace chrono
4b0ee3cce5f7d44d9897ec0f9fd99858c3198afa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (C) 2015 Davis E. King (davis@dlib.net) // License: Boost Software License See LICENSE.txt for the full license. #include "cuda_utils.h" #include "cuda_dlib.h" namespace dlib { namespace cuda { // ----------------------------------------------------------------------------------- void set_device ( int dev ) { CHECK_CUDA(hipSetDevice(dev)); } int get_device ( ) { int dev = 0; CHECK_CUDA(hipGetDevice(&dev)); return dev; } int get_num_devices ( ) { int num_devices; CHECK_CUDA(hipGetDeviceCount(&num_devices)); return num_devices; } // ----------------------------------------------------------------------------------- __global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]*s2[i]; } } __global__ void _cuda_multiply2(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n, size_t max_size) { for (auto i : grid_stride_range(0, n)) { d[i] = 0; for (size_t j = i; j < max_size; j += n) d[i] += s1[j%s1_n]*s2[j%s2_n]; } } __global__ void _cuda_multiply3(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i%s1_n]*s2[i%s2_n]; } } void multiply ( tensor& dest, const tensor& src1, const tensor& src2 ) { DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() && dest.nr() == src1.nr() && src1.nr() == src2.nr() && dest.nc() == src1.nc() && src1.nc() == src2.nc() ,""); const long MD = ::max(::max(dest.num_samples(),src1.num_samples()),src2.num_samples()); DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) && (src1.num_samples()==1 || src1.num_samples()==MD) && (src2.num_samples()==1 || src2.num_samples()==MD) ,""); if (dest.size() == 0) return; const size_t max_size = ::max(::max(dest.size(),src1.size()),src2.size()); const auto d = dest.host(); const auto s1 = src1.host(); const auto s2 = src2.host(); if (dest.size() == src1.size() && src1.size() == src2.size()) { launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size()); } else if (dest.num_samples() == 1) { launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size(), max_size); } else { launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size()); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] = s1[i]*s2[k]; } } __global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { // zero initialize d before we begin. for (auto i : grid_stride_range(0, ks)) d[i] = 0; __syncthreads(); // loop over all the image planes for (auto i : grid_stride_range_y(0, n)) { // sum all the elements in the i-th image plane float temp = 0; for (auto j : grid_stride_range(i*bs, (i+1)*bs)) temp += s1[j]*s2[j]; auto k = i%ks; // and store the sum into d[k] warp_reduce_atomic_add(d[k], temp); } } void multiply_conv ( tensor& dest, const tensor& src1, const tensor& src2 ) { if (have_same_dimensions(dest,src1)) { DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k(),""); if (dest.size() == 0) return; launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()), dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k()); } else { DLIB_CASSERT(have_same_dimensions(src1,src2),""); DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k(),""); if (dest.size() == 0) return; dim3 blocks(10,1); dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel. hipLaunchKernelGGL(( _cuda_multiply_conv2), dim3(blocks),dim3(threads), 0, 0, dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k()); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]+s2[i]; } } __global__ void _cuda_add2(float* d, const float* s1, const float* s2, size_t dn, size_t dk, size_t dr, size_t dc, size_t s1n, size_t s1k, size_t s1r, size_t s1c, size_t s2n, size_t s2k, size_t s2r, size_t s2c) { for (auto i : grid_stride_range(0, dn*dk*dr*dc)) { size_t n,k,r,c; unpack_idx(i, dk,dr,dc, n,k,r,c); float v1 = 0; float v2 = 0; if (n < s1n && k < s1k && r < s1r && c < s1c ) { v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)]; } if (n < s2n && k < s2k && r < s2r && c < s2c ) { v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)]; } d[i] = v1+v2; } } void add ( tensor& dest, const tensor& src1, const tensor& src2 ) { if (dest.size() == 0) return; // Do the simple and fast version if everything has the same dimensions if (have_same_dimensions(dest, src1) && have_same_dimensions(dest, src2)) { launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size()); } else { // Otherwise, do the more complex version with bounds checking. launch_kernel(_cuda_add2,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.num_samples(), dest.k(), dest.nr(), dest.nc(), src1.num_samples(), src1.k(), src1.nr(), src1.nc(), src2.num_samples(), src2.k(), src2.nr(), src2.nc() ); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s[i] + B; } } __global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s[i]; } } void affine_transform( tensor& dest, const tensor& src, const float A, const float B ) { DLIB_CASSERT(dest.size()==src.size(),""); if (B != 0) launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B); else launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i] + C; } } __global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i]; } } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const float A, const float B, const float C ) { DLIB_CASSERT(dest.size()==src1.size(),""); DLIB_CASSERT(dest.size()==src2.size(),""); if (C != 0) launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C); else launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale) { for (auto i : grid_stride_range(0, n)) { d[i] += scale*s[i]; } } void add_scaled( tensor& dest, const float scale, const tensor& src ) { DLIB_CASSERT(dest.size()==src.size(),""); launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform5( float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D ) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D; } } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const tensor& src3, const float A, const float B, const float C, const float D ) { DLIB_CASSERT(dest.size()==src1.size(),""); DLIB_CASSERT(dest.size()==src2.size(),""); DLIB_CASSERT(dest.size()==src3.size(),""); launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src3.device(), dest.size(), A, B, C, D); } // ----------------------------------------------------------------------------------- __global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B) { for (auto i : grid_stride_range(0, n)) { d[i] = A[i]*s[i] + B[i]; } } __global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs) { for (auto i : grid_stride_range(0, n)) { d[i] = A[i%bs]*s[i] + B[i%bs]; } } void affine_transform( tensor& dest, const tensor& src, const tensor& A, const tensor& B ) { DLIB_CASSERT(have_same_dimensions(dest, src),""); DLIB_CASSERT( ((A.num_samples()==1 && B.num_samples()==1) || (A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())) && A.nr()==B.nr() && B.nr()==src.nr() && A.nc()==B.nc() && B.nc()==src.nc() && A.k() ==B.k() && B.k()==src.k(),""); if (A.num_samples() == 1) { launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size()); } else { launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device()); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_compute_adam_update( size_t n, float* s, float* m, float* v, const float alpha, const float weight_decay, const float momentum1, const float momentum2, const float* params, const float* params_grad ) { const float eps = 1e-8; // The loop is equivalent to doing this: // m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad); // v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad); // s = -alpha*m/(sqrt(v) + eps); for (auto i : grid_stride_range(0, n)) { float g = (weight_decay*params[i] + params_grad[i]); m[i] = momentum1*m[i] + (1-momentum1)*g; v[i] = momentum2*v[i] + (1-momentum2)*g*g; s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps); } } void compute_adam_update ( tensor& s, tensor& m, tensor& v, const float t, const float learning_rate, const float weight_decay, const float momentum1, const float momentum2, const tensor& params, const tensor& params_grad ) { DLIB_CASSERT(s.size() == m.size() && s.size() == v.size() && s.size() == params.size() && s.size() == params_grad.size(),""); const float alpha = learning_rate*std::sqrt(1-::pow(momentum2,t))/(1-::pow(momentum1, t)); launch_kernel(_cuda_compute_adam_update,max_jobs(s.size()), s.size(), s.device(), m.device(), v.device(), alpha, weight_decay, momentum1, momentum2, params.device(), params_grad.device()); } // ----------------------------------------------------------------------------------- __global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] = A[k]*s[i] + B[k]; } } void affine_transform_conv( tensor& dest, const tensor& src, const tensor& A, const tensor& B ) { DLIB_CASSERT(have_same_dimensions(dest, src),""); DLIB_CASSERT(have_same_dimensions(A, B),""); DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k(),""); launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()), dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k()); } // ----------------------------------------------------------------------------------- __global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n) { for (auto i : grid_stride_range(0, n)) { out[i] = in[i]; for (size_t j = i+n; j < total_n; j+=n) out[i] += in[j]; } } void assign_bias_gradient ( tensor& grad, const tensor& gradient_input ) { DLIB_CASSERT( grad.num_samples() == 1 && gradient_input.k() == grad.k() && gradient_input.nr() == grad.nr() && gradient_input.nc() == grad.nc() && gradient_input.size() > 0,""); launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size()); } // ----------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------- __global__ void _cuda_threshold(float* d, size_t n, float thresh) { for (auto i : grid_stride_range(0, n)) { d[i] = d[i]>thresh ? 1:0; } } void threshold ( tensor& data, float thresh ) { launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh); } // ------------------------------------------------------------------------------------ __global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result) { // Parallel sum everything into local temp variables. float temp = 0; for(auto i : grid_stride_range(0, n)) temp += a[i]*b[i]; // Then do the warp reduce add thing to merge into one output value. warp_reduce_atomic_add(*result, temp); } void dot ( const tensor& a, const tensor& b, tensor& result, size_t idx ) { DLIB_CASSERT(a.size() == b.size(), ""); DLIB_CASSERT(idx < result.size(), ""); launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp) { const float p = *pp; for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) d[i] = s[i]; else d[i] = p*s[i]; } } void prelu ( tensor& dest, const tensor& src, const tensor& param ) { launch_kernel(_cuda_prelu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), param.device()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad) { const float p = *pp; float pgrad = 0; for(auto i : grid_stride_range(0, n)) { if (s[i] > 0) { out[i] += gi[i]; } else { out[i] += p*gi[i]; pgrad += gi[i]*s[i]; } } // Then do the warp reduce add thing to merge into one output value. warp_reduce_atomic_add(*ppgrad, pgrad); } void prelu_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input, const tensor& param, tensor& params_grad ) { params_grad = 0; launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()), grad.device(), src.device(), gradient_input.device(), grad.size(), param.device(), params_grad.device()); } // ---------------------------------------------------------------------------------------- } }
4b0ee3cce5f7d44d9897ec0f9fd99858c3198afa.cu
// Copyright (C) 2015 Davis E. King (davis@dlib.net) // License: Boost Software License See LICENSE.txt for the full license. #include "cuda_utils.h" #include "cuda_dlib.h" namespace dlib { namespace cuda { // ----------------------------------------------------------------------------------- void set_device ( int dev ) { CHECK_CUDA(cudaSetDevice(dev)); } int get_device ( ) { int dev = 0; CHECK_CUDA(cudaGetDevice(&dev)); return dev; } int get_num_devices ( ) { int num_devices; CHECK_CUDA(cudaGetDeviceCount(&num_devices)); return num_devices; } // ----------------------------------------------------------------------------------- __global__ void _cuda_multiply1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]*s2[i]; } } __global__ void _cuda_multiply2(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n, size_t max_size) { for (auto i : grid_stride_range(0, n)) { d[i] = 0; for (size_t j = i; j < max_size; j += n) d[i] += s1[j%s1_n]*s2[j%s2_n]; } } __global__ void _cuda_multiply3(float* d, const float* s1, const float* s2, size_t n, size_t s1_n, size_t s2_n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i%s1_n]*s2[i%s2_n]; } } void multiply ( tensor& dest, const tensor& src1, const tensor& src2 ) { DLIB_CASSERT(dest.k() == src1.k() && src1.k() == src2.k() && dest.nr() == src1.nr() && src1.nr() == src2.nr() && dest.nc() == src1.nc() && src1.nc() == src2.nc() ,""); const long MD = std::max(std::max(dest.num_samples(),src1.num_samples()),src2.num_samples()); DLIB_CASSERT((dest.num_samples()==1 || dest.num_samples()==MD) && (src1.num_samples()==1 || src1.num_samples()==MD) && (src2.num_samples()==1 || src2.num_samples()==MD) ,""); if (dest.size() == 0) return; const size_t max_size = std::max(std::max(dest.size(),src1.size()),src2.size()); const auto d = dest.host(); const auto s1 = src1.host(); const auto s2 = src2.host(); if (dest.size() == src1.size() && src1.size() == src2.size()) { launch_kernel(_cuda_multiply1,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src1.size()); } else if (dest.num_samples() == 1) { launch_kernel(_cuda_multiply2,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size(), max_size); } else { launch_kernel(_cuda_multiply3,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), src1.size(), src2.size()); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_multiply_conv(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] = s1[i]*s2[k]; } } __global__ void _cuda_multiply_conv2(float* d, const float* s1, size_t n, const float* s2, size_t bs, size_t ks) { // zero initialize d before we begin. for (auto i : grid_stride_range(0, ks)) d[i] = 0; __syncthreads(); // loop over all the image planes for (auto i : grid_stride_range_y(0, n)) { // sum all the elements in the i-th image plane float temp = 0; for (auto j : grid_stride_range(i*bs, (i+1)*bs)) temp += s1[j]*s2[j]; auto k = i%ks; // and store the sum into d[k] warp_reduce_atomic_add(d[k], temp); } } void multiply_conv ( tensor& dest, const tensor& src1, const tensor& src2 ) { if (have_same_dimensions(dest,src1)) { DLIB_CASSERT(src2.num_samples() == 1 && src2.nr() == 1 && src2.nc() == 1 && src2.k() == src1.k(),""); if (dest.size() == 0) return; launch_kernel(_cuda_multiply_conv,max_jobs(dest.size()), dest.device(), src1.device(), src1.size(), src2.device(), src1.nr()*src1.nc(), src1.k()); } else { DLIB_CASSERT(have_same_dimensions(src1,src2),""); DLIB_CASSERT(dest.num_samples() == 1 && dest.nr() == 1 && dest.nc() == 1 && dest.k() == src1.k(),""); if (dest.size() == 0) return; dim3 blocks(10,1); dim3 threads(32,32); // x size must be 32 because we are using warp_reduce_atomic_add() in the kernel. _cuda_multiply_conv2<<<blocks,threads>>>( dest.device(), src1.device(), src1.num_samples()*src1.k(), src2.device(), src1.nr()*src1.nc(), src1.k()); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_add1(float* d, const float* s1, const float* s2, size_t n) { for (auto i : grid_stride_range(0, n)) { d[i] = s1[i]+s2[i]; } } __global__ void _cuda_add2(float* d, const float* s1, const float* s2, size_t dn, size_t dk, size_t dr, size_t dc, size_t s1n, size_t s1k, size_t s1r, size_t s1c, size_t s2n, size_t s2k, size_t s2r, size_t s2c) { for (auto i : grid_stride_range(0, dn*dk*dr*dc)) { size_t n,k,r,c; unpack_idx(i, dk,dr,dc, n,k,r,c); float v1 = 0; float v2 = 0; if (n < s1n && k < s1k && r < s1r && c < s1c ) { v1 = s1[pack_idx(s1k,s1r,s1c, n,k,r,c)]; } if (n < s2n && k < s2k && r < s2r && c < s2c ) { v2 = s2[pack_idx(s2k,s2r,s2c, n,k,r,c)]; } d[i] = v1+v2; } } void add ( tensor& dest, const tensor& src1, const tensor& src2 ) { if (dest.size() == 0) return; // Do the simple and fast version if everything has the same dimensions if (have_same_dimensions(dest, src1) && have_same_dimensions(dest, src2)) { launch_kernel(_cuda_add1,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.size()); } else { // Otherwise, do the more complex version with bounds checking. launch_kernel(_cuda_add2,max_jobs(dest.size()), dest.device(), src1.device(), src2.device(), dest.num_samples(), dest.k(), dest.nr(), dest.nc(), src1.num_samples(), src1.k(), src1.nr(), src1.nc(), src2.num_samples(), src2.k(), src2.nr(), src2.nc() ); } } // ------------------------------------------------------------------------------------ __global__ void _cuda_affine_transform1(float* d, const float* s, size_t n, float A, float B) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s[i] + B; } } __global__ void _cuda_affine_transform1_0(float* d, const float* s, size_t n, float A) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s[i]; } } void affine_transform( tensor& dest, const tensor& src, const float A, const float B ) { DLIB_CASSERT(dest.size()==src.size(),""); if (B != 0) launch_kernel(_cuda_affine_transform1,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A, B); else launch_kernel(_cuda_affine_transform1_0,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform4(float* d, const float* s1, const float* s2, size_t n, float A, float B, float C) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i] + C; } } __global__ void _cuda_affine_transform4_0(float* d, const float* s1, const float* s2, size_t n, float A, float B) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i]; } } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const float A, const float B, const float C ) { DLIB_CASSERT(dest.size()==src1.size(),""); DLIB_CASSERT(dest.size()==src2.size(),""); if (C != 0) launch_kernel(_cuda_affine_transform4,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B, C); else launch_kernel(_cuda_affine_transform4_0,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), dest.size(), A, B); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_add_scaled(float* d, const float* s, size_t n, float scale) { for (auto i : grid_stride_range(0, n)) { d[i] += scale*s[i]; } } void add_scaled( tensor& dest, const float scale, const tensor& src ) { DLIB_CASSERT(dest.size()==src.size(),""); launch_kernel(_cuda_add_scaled,max_jobs(dest.size()),dest.device(), src.device(), dest.size(), scale); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_affine_transform5( float* d, const float* s1, const float* s2, const float* s3, size_t n, float A, float B, float C, float D ) { for (auto i : grid_stride_range(0, n)) { d[i] = A*s1[i] + B*s2[i] + C*s3[i] + D; } } void affine_transform( tensor& dest, const tensor& src1, const tensor& src2, const tensor& src3, const float A, const float B, const float C, const float D ) { DLIB_CASSERT(dest.size()==src1.size(),""); DLIB_CASSERT(dest.size()==src2.size(),""); DLIB_CASSERT(dest.size()==src3.size(),""); launch_kernel(_cuda_affine_transform5,max_jobs(dest.size()),dest.device(), src1.device(), src2.device(), src3.device(), dest.size(), A, B, C, D); } // ----------------------------------------------------------------------------------- __global__ void _cuda_affine_transform2(float* d, const float* s, size_t n, const float* A, const float* B) { for (auto i : grid_stride_range(0, n)) { d[i] = A[i]*s[i] + B[i]; } } __global__ void _cuda_affine_transform3(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs) { for (auto i : grid_stride_range(0, n)) { d[i] = A[i%bs]*s[i] + B[i%bs]; } } void affine_transform( tensor& dest, const tensor& src, const tensor& A, const tensor& B ) { DLIB_CASSERT(have_same_dimensions(dest, src),""); DLIB_CASSERT( ((A.num_samples()==1 && B.num_samples()==1) || (A.num_samples()==src.num_samples() && B.num_samples()==src.num_samples())) && A.nr()==B.nr() && B.nr()==src.nr() && A.nc()==B.nc() && B.nc()==src.nc() && A.k() ==B.k() && B.k()==src.k(),""); if (A.num_samples() == 1) { launch_kernel(_cuda_affine_transform3,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device(), A.size()); } else { launch_kernel(_cuda_affine_transform2,max_jobs(dest.size()),dest.device(), src.device(), src.size(), A.device(), B.device()); } } // ---------------------------------------------------------------------------------------- __global__ void _cuda_compute_adam_update( size_t n, float* s, float* m, float* v, const float alpha, const float weight_decay, const float momentum1, const float momentum2, const float* params, const float* params_grad ) { const float eps = 1e-8; // The loop is equivalent to doing this: // m = momentum1*m + (1-momentum1) * (weight_decay*params + params_grad); // v = momentum2*v + (1-momentum2)*squared(weight_decay*params + params_grad); // s = -alpha*m/(sqrt(v) + eps); for (auto i : grid_stride_range(0, n)) { float g = (weight_decay*params[i] + params_grad[i]); m[i] = momentum1*m[i] + (1-momentum1)*g; v[i] = momentum2*v[i] + (1-momentum2)*g*g; s[i] = -alpha*m[i]/(std::sqrt(v[i]) + eps); } } void compute_adam_update ( tensor& s, tensor& m, tensor& v, const float t, const float learning_rate, const float weight_decay, const float momentum1, const float momentum2, const tensor& params, const tensor& params_grad ) { DLIB_CASSERT(s.size() == m.size() && s.size() == v.size() && s.size() == params.size() && s.size() == params_grad.size(),""); const float alpha = learning_rate*std::sqrt(1-std::pow(momentum2,t))/(1-std::pow(momentum1, t)); launch_kernel(_cuda_compute_adam_update,max_jobs(s.size()), s.size(), s.device(), m.device(), v.device(), alpha, weight_decay, momentum1, momentum2, params.device(), params_grad.device()); } // ----------------------------------------------------------------------------------- __global__ void _cuda_affine_transform_conv(float* d, const float* s, size_t n, const float* A, const float* B, size_t bs, size_t ks) { for (auto i : grid_stride_range(0, n)) { auto k = (i/bs)%ks; d[i] = A[k]*s[i] + B[k]; } } void affine_transform_conv( tensor& dest, const tensor& src, const tensor& A, const tensor& B ) { DLIB_CASSERT(have_same_dimensions(dest, src),""); DLIB_CASSERT(have_same_dimensions(A, B),""); DLIB_CASSERT(A.num_samples() == 1 && A.nr() == 1 && A.nc() == 1 && A.k() == src.k(),""); launch_kernel(_cuda_affine_transform_conv,max_jobs(dest.size()), dest.device(), src.device(), src.size(), A.device(), B.device(), src.nr()*src.nc(), src.k()); } // ----------------------------------------------------------------------------------- __global__ void _add_bias_gradient(float* out, const float* in, size_t n, size_t total_n) { for (auto i : grid_stride_range(0, n)) { out[i] = in[i]; for (size_t j = i+n; j < total_n; j+=n) out[i] += in[j]; } } void assign_bias_gradient ( tensor& grad, const tensor& gradient_input ) { DLIB_CASSERT( grad.num_samples() == 1 && gradient_input.k() == grad.k() && gradient_input.nr() == grad.nr() && gradient_input.nc() == grad.nc() && gradient_input.size() > 0,""); launch_kernel(_add_bias_gradient,max_jobs(grad.size()),grad.device(), gradient_input.device(), grad.size(), gradient_input.size()); } // ----------------------------------------------------------------------------------- // ----------------------------------------------------------------------------------- __global__ void _cuda_threshold(float* d, size_t n, float thresh) { for (auto i : grid_stride_range(0, n)) { d[i] = d[i]>thresh ? 1:0; } } void threshold ( tensor& data, float thresh ) { launch_kernel(_cuda_threshold,max_jobs(data.size()),data.device(), data.size(), thresh); } // ------------------------------------------------------------------------------------ __global__ void _cuda_dot(const float* a, const float* b, size_t n, float* result) { // Parallel sum everything into local temp variables. float temp = 0; for(auto i : grid_stride_range(0, n)) temp += a[i]*b[i]; // Then do the warp reduce add thing to merge into one output value. warp_reduce_atomic_add(*result, temp); } void dot ( const tensor& a, const tensor& b, tensor& result, size_t idx ) { DLIB_CASSERT(a.size() == b.size(), ""); DLIB_CASSERT(idx < result.size(), ""); launch_kernel(_cuda_dot, max_jobs(a.size()), a.device(), b.device(), a.size(), result.device()+idx); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_prelu(const float* s, float* d, size_t n, const float* pp) { const float p = *pp; for (auto i : grid_stride_range(0, n)) { if (s[i] > 0) d[i] = s[i]; else d[i] = p*s[i]; } } void prelu ( tensor& dest, const tensor& src, const tensor& param ) { launch_kernel(_cuda_prelu, max_jobs(dest.size()), src.device(), dest.device(), src.size(), param.device()); } // ---------------------------------------------------------------------------------------- __global__ void _cuda_prelu_gradient(float* out, const float* s, const float* gi, size_t n, const float* pp, float* ppgrad) { const float p = *pp; float pgrad = 0; for(auto i : grid_stride_range(0, n)) { if (s[i] > 0) { out[i] += gi[i]; } else { out[i] += p*gi[i]; pgrad += gi[i]*s[i]; } } // Then do the warp reduce add thing to merge into one output value. warp_reduce_atomic_add(*ppgrad, pgrad); } void prelu_gradient ( tensor& grad, const tensor& src, const tensor& gradient_input, const tensor& param, tensor& params_grad ) { params_grad = 0; launch_kernel(_cuda_prelu_gradient, max_jobs(grad.size()), grad.device(), src.device(), gradient_input.device(), grad.size(), param.device(), params_grad.device()); } // ---------------------------------------------------------------------------------------- } }
baf0c85dd8f794de2dcc387981135ae9d8019cca.hip
// !!! This is a file automatically generated by hipify!!! /*----------------------------*/ /* ALGORITHMIC TOOLS CLASS */ /* - REDUCTIONS SUBCLASS - */ /* IMPLEMENTATION */ /*----------------------------*/ #include <iostream> #include "GlobalDeclarations.cuh" #include "Algorithms.cuh" #include "ReductionKernels.cuh" using namespace std; /********************************* * DECLARATIONS * * ____________ * *********************************/ extern __device__ datatype* experimental_array; /************************************* * MEMBER FUNCTIONS * * ________________ * *************************************/ /*--------------------------------------------*/ /* Reduction in 1D layout (i.e. vector ) */ /* of __M__A__X__I__M__U__M__ squared value. */ /*--------------------------------------------*/ __host__ bool Algorithms::Reduction::Maximum_squared( datatype* in, unsigned int colsD, datatype* usecount, datatype* replaced, unsigned int j) { // We again assume colsD <= 32 to utilize a single warp device_max << <1, 32 >> > (in, colsD, usecount, replaced, j); return true; } /*-------------------------------------------*/ /* Reduction in 1D layout (i.e. vector) of */ /* __M__A__X__I__M__U__M__ value of its */ /* elements as well as their indices. */ /*-------------------------------------------*/ __host__ bool Algorithms::Reduction::Maximum(datatype* in, int size, datatype* out) { dim3 block(1024); dim3 grid((size + block.x - 1) / block.x); device_max_err_STAGE1 << < grid, block >> > (in, size, out); int N; while (grid.x > 1) { N = grid.x; grid.x = (N + block.x - 1) / block.x; device_max_err_STAGE2 << < grid, block >> > (in, N, out); } return true; } /*-----------------------------------------------*/ /* Reduction in 2D layout (i.e. matrix columns) */ /* of __S__U__M__ of squared values of elements. */ /* Result is then square rooted and inverted. */ /*-----------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce2D_Sum_SQUAREDelements( datatype* in, int rows, int cols, datatype* out, unsigned int recommended) { dim3 block(recommended); dim3 grid(cols); deviceReduceKernel_2D_square_values << <grid, block >> > (in, rows, out); return true; } /*--------------------------------------*/ /* Reduction in 1D layout (i.e. vector) */ /* of __S__U__M__ of squared values of */ /* elements (more than 1024). */ /*--------------------------------------*/ __host__ bool Algorithms::Reduction::reduce1D_Batched_Sum_TOGETHER_collincomb( datatype* in, datatype* out, datatype* counters, int colsD, int colsX, datatype *A, datatype* columns, datatype* out2, unsigned int rowsX) { dim3 block(1024); dim3 grid((colsX + block.x - 1) / block.x, colsD); multi_reduction_squared_sum_STAGE1 << <grid, block >> > (in, out, counters, colsX); grid.x = colsD; grid.y = 2; multi_reduction_squared_sum_STAGE2_together_collincomb << <grid, block >> >( in, out, counters, colsX, A, columns, out2, rowsX); return true; } /*------------------------------------------------*/ /* Reduction in 2D layout (i.e. matrix columns) */ /* of linear combination of input matrix and */ /* given columns (filtered by the specified rows) */ /*------------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce2D_rowlincomb_plus_nrm2_plus_mul( datatype *A, datatype* x, datatype* out, datatype* cols, unsigned int rowsX, unsigned int colsX, datatype* counters, unsigned int recommended, datatype* out2, datatype* D, datatype* out3, unsigned int colsD) { rowlincomb << <colsX + colsD + 1, recommended >> > ( A, x, out, cols, rowsX, counters, out2, D, out3, colsX); return true; } /*-------------------------------------------*/ /* Reduction in 2D layout (i.e. matrix rows) */ /* of dot products with given matrix */ /* (filtered by the specified cols). */ /*-------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce2D_dot_products_modified( datatype* Gamma, datatype* columns, datatype* gammaJ, datatype* out, datatype* counters, unsigned int pJ, unsigned int colsD, unsigned int colsX) { // We also assume that colsD <= 32 dim3 block(1024); dim3 grid((colsX + block.x - 1) / block.x, colsD); multi_reduction_smallGamma_times_gammaJ_STAGE1 << <grid, block >> > ( Gamma, columns, gammaJ, out, counters, pJ); multi_reduction_smallGamma_times_gammaJ_STAGE2 << <colsD, block >> > ( out, counters, pJ); return true; } /*-------------------------------*/ /* Euclidean norm of the vector. */ /*-------------------------------*/ __host__ bool Algorithms::Reduction::euclidean_norm( datatype* in, datatype* out, unsigned int length, unsigned int recommended) { // We assume length i.e. rowsX <= 1024 EUnorm << <1, recommended >> > (in, out, length); return true; } /*-----------------------------------------------*/ /* Compute the Round Mean Square Error between a */ /* matrix X and its approximation matrix X~ */ /*-----------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce_RMSE( datatype* X, datatype* Xappr, datatype* out, unsigned int rows, unsigned int cols, unsigned int iter, unsigned int recommended_threads, datatype* bitmap) { dim3 block(recommended_threads); dim3 grid(cols); RMSE_stage1 << <grid, block >> > (X, Xappr, rows, out); /********************/ block.x = 512; grid.x = (cols + block.x - 1) / block.x; RMSE_stage2 << < grid, block >> > (out, cols, rows*cols, iter, bitmap); /********************/ int N; while (grid.x > 1) { N = grid.x; grid.x = (N + block.x - 1) / block.x; RMSE_stage3 << < grid, block >> > (N, rows*cols, iter); } return true; } /*------------------------------------------------*/ /* Compute the Round Mean Square Error between a */ /* matrix X and its approximation matrix X~ */ /* ( modified for use only in Dictionary Update's */ /* special case ) */ /*------------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce_ERROR_for_special_case( datatype* X, datatype* Xappr, datatype* out, unsigned int rows, unsigned int cols, datatype* counters, datatype* unused, datatype* unsig_counter, unsigned int recommended_threads) { dim3 block(recommended_threads); dim3 grid(MIN(cols,MAX_SIGNALS)); SCase_err_stage1 << <grid, block >> > (X, Xappr, rows, cols, out, counters, unused, unsig_counter); /********************/ int N = grid.x; block.x = 1024; grid.x = (N + block.x - 1) / block.x; SCase_err_stage2 << < grid, block >> > (out, N, Xappr, cols, counters, unsig_counter); /********************/ while (grid.x > 1) { N = grid.x; grid.x = (N + block.x - 1) / block.x; SCase_err_stage3 << < grid, block >> > (N, Xappr, counters); } return true; } /*-------------------------------*/ /* Euclidean norm of the vector. */ /* ONLY for the special case! */ /*-------------------------------*/ __host__ bool Algorithms::Reduction::SCase_EU_norm( datatype* in, datatype* out, unsigned int length, unsigned int recommended, datatype* counters, datatype* unused, unsigned int colsX) { // We assume length <= 1024 SCase_norm << <1, recommended >> > (in, out, length, counters, unused, MIN(colsX, MAX_SIGNALS)); return true; } /*--------------------------------------*/ /* Set the size of the reduction buffer */ /* to be used in the calculations. */ /*--------------------------------------*/ bool Algorithms::Reduction::setReductionBuffer(datatype* b) { // Implicit Synchronization hipError_t cet; if ((cet = hipMemcpyToSymbol( experimental_array, &b, sizeof(datatype*), 0, hipMemcpyHostToDevice) ) != hipSuccess) { cerr << "CudaMemcpyToSymbol (reduction buffer) failed: " << hipGetErrorString(cet) << endl; return false; } return true; }
baf0c85dd8f794de2dcc387981135ae9d8019cca.cu
/*----------------------------*/ /* ALGORITHMIC TOOLS CLASS */ /* - REDUCTIONS SUBCLASS - */ /* IMPLEMENTATION */ /*----------------------------*/ #include <iostream> #include "GlobalDeclarations.cuh" #include "Algorithms.cuh" #include "ReductionKernels.cuh" using namespace std; /********************************* * DECLARATIONS * * ____________ * *********************************/ extern __device__ datatype* experimental_array; /************************************* * MEMBER FUNCTIONS * * ________________ * *************************************/ /*--------------------------------------------*/ /* Reduction in 1D layout (i.e. vector ) */ /* of __M__A__X__I__M__U__M__ squared value. */ /*--------------------------------------------*/ __host__ bool Algorithms::Reduction::Maximum_squared( datatype* in, unsigned int colsD, datatype* usecount, datatype* replaced, unsigned int j) { // We again assume colsD <= 32 to utilize a single warp device_max << <1, 32 >> > (in, colsD, usecount, replaced, j); return true; } /*-------------------------------------------*/ /* Reduction in 1D layout (i.e. vector) of */ /* __M__A__X__I__M__U__M__ value of its */ /* elements as well as their indices. */ /*-------------------------------------------*/ __host__ bool Algorithms::Reduction::Maximum(datatype* in, int size, datatype* out) { dim3 block(1024); dim3 grid((size + block.x - 1) / block.x); device_max_err_STAGE1 << < grid, block >> > (in, size, out); int N; while (grid.x > 1) { N = grid.x; grid.x = (N + block.x - 1) / block.x; device_max_err_STAGE2 << < grid, block >> > (in, N, out); } return true; } /*-----------------------------------------------*/ /* Reduction in 2D layout (i.e. matrix columns) */ /* of __S__U__M__ of squared values of elements. */ /* Result is then square rooted and inverted. */ /*-----------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce2D_Sum_SQUAREDelements( datatype* in, int rows, int cols, datatype* out, unsigned int recommended) { dim3 block(recommended); dim3 grid(cols); deviceReduceKernel_2D_square_values << <grid, block >> > (in, rows, out); return true; } /*--------------------------------------*/ /* Reduction in 1D layout (i.e. vector) */ /* of __S__U__M__ of squared values of */ /* elements (more than 1024). */ /*--------------------------------------*/ __host__ bool Algorithms::Reduction::reduce1D_Batched_Sum_TOGETHER_collincomb( datatype* in, datatype* out, datatype* counters, int colsD, int colsX, datatype *A, datatype* columns, datatype* out2, unsigned int rowsX) { dim3 block(1024); dim3 grid((colsX + block.x - 1) / block.x, colsD); multi_reduction_squared_sum_STAGE1 << <grid, block >> > (in, out, counters, colsX); grid.x = colsD; grid.y = 2; multi_reduction_squared_sum_STAGE2_together_collincomb << <grid, block >> >( in, out, counters, colsX, A, columns, out2, rowsX); return true; } /*------------------------------------------------*/ /* Reduction in 2D layout (i.e. matrix columns) */ /* of linear combination of input matrix and */ /* given columns (filtered by the specified rows) */ /*------------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce2D_rowlincomb_plus_nrm2_plus_mul( datatype *A, datatype* x, datatype* out, datatype* cols, unsigned int rowsX, unsigned int colsX, datatype* counters, unsigned int recommended, datatype* out2, datatype* D, datatype* out3, unsigned int colsD) { rowlincomb << <colsX + colsD + 1, recommended >> > ( A, x, out, cols, rowsX, counters, out2, D, out3, colsX); return true; } /*-------------------------------------------*/ /* Reduction in 2D layout (i.e. matrix rows) */ /* of dot products with given matrix */ /* (filtered by the specified cols). */ /*-------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce2D_dot_products_modified( datatype* Gamma, datatype* columns, datatype* gammaJ, datatype* out, datatype* counters, unsigned int pJ, unsigned int colsD, unsigned int colsX) { // We also assume that colsD <= 32 dim3 block(1024); dim3 grid((colsX + block.x - 1) / block.x, colsD); multi_reduction_smallGamma_times_gammaJ_STAGE1 << <grid, block >> > ( Gamma, columns, gammaJ, out, counters, pJ); multi_reduction_smallGamma_times_gammaJ_STAGE2 << <colsD, block >> > ( out, counters, pJ); return true; } /*-------------------------------*/ /* Euclidean norm of the vector. */ /*-------------------------------*/ __host__ bool Algorithms::Reduction::euclidean_norm( datatype* in, datatype* out, unsigned int length, unsigned int recommended) { // We assume length i.e. rowsX <= 1024 EUnorm << <1, recommended >> > (in, out, length); return true; } /*-----------------------------------------------*/ /* Compute the Round Mean Square Error between a */ /* matrix X and its approximation matrix X~ */ /*-----------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce_RMSE( datatype* X, datatype* Xappr, datatype* out, unsigned int rows, unsigned int cols, unsigned int iter, unsigned int recommended_threads, datatype* bitmap) { dim3 block(recommended_threads); dim3 grid(cols); RMSE_stage1 << <grid, block >> > (X, Xappr, rows, out); /********************/ block.x = 512; grid.x = (cols + block.x - 1) / block.x; RMSE_stage2 << < grid, block >> > (out, cols, rows*cols, iter, bitmap); /********************/ int N; while (grid.x > 1) { N = grid.x; grid.x = (N + block.x - 1) / block.x; RMSE_stage3 << < grid, block >> > (N, rows*cols, iter); } return true; } /*------------------------------------------------*/ /* Compute the Round Mean Square Error between a */ /* matrix X and its approximation matrix X~ */ /* ( modified for use only in Dictionary Update's */ /* special case ) */ /*------------------------------------------------*/ __host__ bool Algorithms::Reduction::reduce_ERROR_for_special_case( datatype* X, datatype* Xappr, datatype* out, unsigned int rows, unsigned int cols, datatype* counters, datatype* unused, datatype* unsig_counter, unsigned int recommended_threads) { dim3 block(recommended_threads); dim3 grid(MIN(cols,MAX_SIGNALS)); SCase_err_stage1 << <grid, block >> > (X, Xappr, rows, cols, out, counters, unused, unsig_counter); /********************/ int N = grid.x; block.x = 1024; grid.x = (N + block.x - 1) / block.x; SCase_err_stage2 << < grid, block >> > (out, N, Xappr, cols, counters, unsig_counter); /********************/ while (grid.x > 1) { N = grid.x; grid.x = (N + block.x - 1) / block.x; SCase_err_stage3 << < grid, block >> > (N, Xappr, counters); } return true; } /*-------------------------------*/ /* Euclidean norm of the vector. */ /* ONLY for the special case! */ /*-------------------------------*/ __host__ bool Algorithms::Reduction::SCase_EU_norm( datatype* in, datatype* out, unsigned int length, unsigned int recommended, datatype* counters, datatype* unused, unsigned int colsX) { // We assume length <= 1024 SCase_norm << <1, recommended >> > (in, out, length, counters, unused, MIN(colsX, MAX_SIGNALS)); return true; } /*--------------------------------------*/ /* Set the size of the reduction buffer */ /* to be used in the calculations. */ /*--------------------------------------*/ bool Algorithms::Reduction::setReductionBuffer(datatype* b) { // Implicit Synchronization cudaError_t cet; if ((cet = cudaMemcpyToSymbol( experimental_array, &b, sizeof(datatype*), 0, cudaMemcpyHostToDevice) ) != cudaSuccess) { cerr << "CudaMemcpyToSymbol (reduction buffer) failed: " << cudaGetErrorString(cet) << endl; return false; } return true; }
0f23d70970c485c71027a6c08e4bf85e272f09e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Histogram Equalization #include <wb.h> #define HISTOGRAM_LENGTH 256 #define BLOCK_SIZE 128 //@@ insert code here __device__ unsigned char clamp(unsigned char x, unsigned char start, unsigned char end) { return min(max(x, start), end); } __host__ __device__ float prob(int x, int width, int height) { return 1.0*x/(width*height); } __global__ void convertFloatToChar(float *in, unsigned char *out, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { out[index] = (unsigned char) (in[index] * 255); } } __global__ void RGBToGrayScale(unsigned char *rgb, unsigned char *gray, int size, int width, int height, int channels) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; int index = row*width + col; if (row < height && col < width) { unsigned char r = rgb[index*channels+0]; unsigned char g = rgb[index*channels+1]; unsigned char b = rgb[index*channels+2]; gray[index] = (unsigned char) (0.21*r + 0.71*g + 0.07*b); } } __global__ void histogram(unsigned char *buffer, unsigned int *histo, int size) { int i = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned int histo_private[256]; if (threadIdx.x < 256) { histo_private[threadIdx.x] = 0; } __syncthreads(); // Stride is total number of threads int stride = blockDim.x * gridDim.x; while (i < size) { atomicAdd(&(histo_private[buffer[i]]), 1); i += stride; } __syncthreads(); if (threadIdx.x < 256) { atomicAdd(&histo[threadIdx.x], histo_private[threadIdx.x]); } } __global__ void scan(unsigned int * input, float * output, int len, int width, int height) { unsigned int start = 2 * blockIdx.x * blockDim.x; unsigned int t = threadIdx.x; int i = start + t; __shared__ float XY[2*BLOCK_SIZE]; if (start + t < len) { XY[t] = prob(input[start + t], width, height); } else { XY[t] = 0.0f; } if (start + blockDim.x + t < len) { XY[t + blockDim.x] = prob(input[start + blockDim.x + t], width, height); } else { XY[t + blockDim.x] = 0.0f; } __syncthreads(); for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2) { int index = (threadIdx.x+1)*stride*2 - 1; if (index < 2*BLOCK_SIZE) { XY[index] += XY[index-stride]; } __syncthreads(); } for (int stride = BLOCK_SIZE/2; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x+1)*stride*2 - 1; if (index + stride < 2*BLOCK_SIZE) { XY[index+stride] += XY[index]; } } __syncthreads(); if (i < len) { output[i] = XY[t]; output[start+blockDim.x+t] = XY[t+blockDim.x]; } } __global__ void reduceMin(float *input, float *output, int len) { unsigned int start = 2 * blockIdx.x * blockDim.x; unsigned int t = threadIdx.x; __shared__ float partialSum[2*BLOCK_SIZE]; if (start + t < len) { partialSum[t] = input[start+t]; } else { partialSum[t] = 0.0f; } if (start + blockDim.x + t < len) { partialSum[blockDim.x + t] = input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0f; } for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) { partialSum[t] = min(partialSum[t], partialSum[t+stride]); } } __syncthreads(); if (t == 0) { output[blockIdx.x] = partialSum[t]; } } __global__ void correct_color(unsigned char *uCharImage, float *cdf, float cdfmin, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { unsigned char val = uCharImage[index]; uCharImage[index] = clamp(255*(cdf[val] - cdfmin)/(1 - cdfmin), 0, 255); // uCharImage[index] = min(max(255*(cdf[uCharImage[index]] - cdfmin)/(1 - cdfmin),0.0),255.0); } } __global__ void convertCharToFloat(unsigned char* input, float *output, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { output[index] = (float) (input[index]/255.0); } } /** computes the history equalization and converts image back to float **/ __global__ void hist_eq(unsigned char * deviceCharImg, float * output, float* cdf, float cdfmin, int size) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < size) { deviceCharImg[index] = min(max(255*(cdf[deviceCharImg[index]] - cdfmin)/(1 - cdfmin),0.0),255.0); output[index] = (float) (deviceCharImg[index]/255.0); } } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; //@@ Insert more code here float *deviceInputImageData; float *deviceOutputImageData; unsigned char *deviceCastImageData; unsigned char *deviceGrayScaleData; unsigned int *deviceHistogram; float *deviceHistoScan; unsigned int *hostHistogram; float *hostHistoScan; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); wbTime_stop(Generic, "Importing data and creating memory on host"); //@@ insert code here hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); hipMalloc((void**)&deviceInputImageData, sizeof(float) * imageWidth * imageHeight * imageChannels); hipMalloc((void**)&deviceOutputImageData, sizeof(float) * imageWidth * imageHeight * imageChannels); hipMalloc((void**)&deviceCastImageData, sizeof(unsigned char) * imageWidth * imageHeight * imageChannels); hipMalloc((void**)&deviceGrayScaleData, sizeof(unsigned char) * imageWidth * imageHeight); hipMalloc((void**)&deviceHistogram, sizeof(unsigned int) * HISTOGRAM_LENGTH); hipMalloc((void**)&deviceHistoScan, sizeof(float) * HISTOGRAM_LENGTH); hipMemcpy(deviceInputImageData, hostInputImageData, sizeof(float) * imageWidth * imageHeight * imageChannels, hipMemcpyHostToDevice); hipMemset(deviceHistogram, 0, sizeof(unsigned int) * HISTOGRAM_LENGTH); hipMemset(deviceHistoScan, 0.0f, sizeof(float) * HISTOGRAM_LENGTH); int imageDataSize = imageWidth * imageHeight * imageChannels; int imageSize = imageWidth * imageHeight; hipLaunchKernelGGL(( convertFloatToChar), dim3((imageDataSize-1)/1024+1), dim3(1024), 0, 0, deviceInputImageData, deviceCastImageData, imageDataSize); dim3 dimBlock(12, 12, 1); dim3 dimGrid((imageWidth - 1)/12 + 1, (imageHeight - 1)/12 + 1, 1); hipLaunchKernelGGL(( RGBToGrayScale), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceCastImageData, deviceGrayScaleData, imageSize, imageWidth, imageHeight, imageChannels); hipLaunchKernelGGL(( histogram), dim3((imageSize-1)/1024+1), dim3(1024), 0, 0, deviceGrayScaleData, deviceHistogram, imageSize); hostHistogram = (unsigned int*) malloc(sizeof(unsigned int) * HISTOGRAM_LENGTH); memset(hostHistogram, 0, sizeof(unsigned int) * HISTOGRAM_LENGTH); hipMemcpy(hostHistogram, deviceHistogram, sizeof(unsigned int) * HISTOGRAM_LENGTH, hipMemcpyDeviceToHost); hostHistoScan = (float*) malloc(sizeof(float) * HISTOGRAM_LENGTH); memset(hostHistoScan, 0.0f, sizeof(float) * HISTOGRAM_LENGTH); /* hostHistoScan[0] = prob(hostHistogram[0], imageWidth, imageHeight); for (int i = 1; i < 256; i++) { hostHistoScan[i] = hostHistoScan[i-1] + prob(hostHistogram[i], imageWidth, imageHeight); } */ hipLaunchKernelGGL(( scan), dim3(((HISTOGRAM_LENGTH/2)-1)/128+1), dim3(128), 0, 0, deviceHistogram, deviceHistoScan, HISTOGRAM_LENGTH, imageWidth, imageHeight); // hipMemcpy(hostHistoScan, deviceHistoScan, sizeof(float) * HISTOGRAM_LENGTH, hipMemcpyDeviceToHost); /* float cdfmin = hostHistoScan[0]; for (int i = 1; i < 256; i++) { cdfmin = min(cdfmin, hostHistoScan[i]); }*/ float *deviceMin; hipMalloc((void**)&deviceMin, sizeof(float)*2); hipLaunchKernelGGL(( reduceMin), dim3(((HISTOGRAM_LENGTH/2)-1)/128+1), dim3(128), 0, 0, deviceHistoScan, deviceMin, HISTOGRAM_LENGTH); float *hostMin; hostMin = (float*)malloc(sizeof(float) * 2); hipMemcpy(hostMin, deviceMin, sizeof(float) * 2, hipMemcpyDeviceToHost); float cdfmin = hostMin[0]; // hipMemcpy(deviceHistoScan, hostHistoScan, sizeof(float) * HISTOGRAM_LENGTH, hipMemcpyHostToDevice); hipLaunchKernelGGL(( correct_color), dim3((imageDataSize-1)/1024+1), dim3(1024), 0, 0, deviceCastImageData, deviceHistoScan, cdfmin, imageDataSize); hipLaunchKernelGGL(( convertCharToFloat), dim3((imageDataSize-1)/1024+1), dim3(1024), 0, 0, deviceCastImageData, deviceOutputImageData, imageDataSize); // hist_eq<<<(imageDataSize-1)/1024+1, 1024>>>(deviceCastImageData, deviceOutputImageData, deviceHistoScan, cdfmin, imageDataSize); hipMemcpy(hostOutputImageData, deviceOutputImageData, sizeof(float) * imageDataSize, hipMemcpyDeviceToHost); wbSolution(args, outputImage); //@@ insert code here hipFree(deviceInputImageData); hipFree(deviceOutputImageData); hipFree(deviceCastImageData); hipFree(deviceGrayScaleData); hipFree(deviceHistogram); hipFree(deviceHistoScan); free(hostHistogram); free(hostHistoScan); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
0f23d70970c485c71027a6c08e4bf85e272f09e0.cu
// Histogram Equalization #include <wb.h> #define HISTOGRAM_LENGTH 256 #define BLOCK_SIZE 128 //@@ insert code here __device__ unsigned char clamp(unsigned char x, unsigned char start, unsigned char end) { return min(max(x, start), end); } __host__ __device__ float prob(int x, int width, int height) { return 1.0*x/(width*height); } __global__ void convertFloatToChar(float *in, unsigned char *out, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { out[index] = (unsigned char) (in[index] * 255); } } __global__ void RGBToGrayScale(unsigned char *rgb, unsigned char *gray, int size, int width, int height, int channels) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; int index = row*width + col; if (row < height && col < width) { unsigned char r = rgb[index*channels+0]; unsigned char g = rgb[index*channels+1]; unsigned char b = rgb[index*channels+2]; gray[index] = (unsigned char) (0.21*r + 0.71*g + 0.07*b); } } __global__ void histogram(unsigned char *buffer, unsigned int *histo, int size) { int i = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned int histo_private[256]; if (threadIdx.x < 256) { histo_private[threadIdx.x] = 0; } __syncthreads(); // Stride is total number of threads int stride = blockDim.x * gridDim.x; while (i < size) { atomicAdd(&(histo_private[buffer[i]]), 1); i += stride; } __syncthreads(); if (threadIdx.x < 256) { atomicAdd(&histo[threadIdx.x], histo_private[threadIdx.x]); } } __global__ void scan(unsigned int * input, float * output, int len, int width, int height) { unsigned int start = 2 * blockIdx.x * blockDim.x; unsigned int t = threadIdx.x; int i = start + t; __shared__ float XY[2*BLOCK_SIZE]; if (start + t < len) { XY[t] = prob(input[start + t], width, height); } else { XY[t] = 0.0f; } if (start + blockDim.x + t < len) { XY[t + blockDim.x] = prob(input[start + blockDim.x + t], width, height); } else { XY[t + blockDim.x] = 0.0f; } __syncthreads(); for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2) { int index = (threadIdx.x+1)*stride*2 - 1; if (index < 2*BLOCK_SIZE) { XY[index] += XY[index-stride]; } __syncthreads(); } for (int stride = BLOCK_SIZE/2; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x+1)*stride*2 - 1; if (index + stride < 2*BLOCK_SIZE) { XY[index+stride] += XY[index]; } } __syncthreads(); if (i < len) { output[i] = XY[t]; output[start+blockDim.x+t] = XY[t+blockDim.x]; } } __global__ void reduceMin(float *input, float *output, int len) { unsigned int start = 2 * blockIdx.x * blockDim.x; unsigned int t = threadIdx.x; __shared__ float partialSum[2*BLOCK_SIZE]; if (start + t < len) { partialSum[t] = input[start+t]; } else { partialSum[t] = 0.0f; } if (start + blockDim.x + t < len) { partialSum[blockDim.x + t] = input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0f; } for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) { partialSum[t] = min(partialSum[t], partialSum[t+stride]); } } __syncthreads(); if (t == 0) { output[blockIdx.x] = partialSum[t]; } } __global__ void correct_color(unsigned char *uCharImage, float *cdf, float cdfmin, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { unsigned char val = uCharImage[index]; uCharImage[index] = clamp(255*(cdf[val] - cdfmin)/(1 - cdfmin), 0, 255); // uCharImage[index] = min(max(255*(cdf[uCharImage[index]] - cdfmin)/(1 - cdfmin),0.0),255.0); } } __global__ void convertCharToFloat(unsigned char* input, float *output, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { output[index] = (float) (input[index]/255.0); } } /** computes the history equalization and converts image back to float **/ __global__ void hist_eq(unsigned char * deviceCharImg, float * output, float* cdf, float cdfmin, int size) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < size) { deviceCharImg[index] = min(max(255*(cdf[deviceCharImg[index]] - cdfmin)/(1 - cdfmin),0.0),255.0); output[index] = (float) (deviceCharImg[index]/255.0); } } int main(int argc, char ** argv) { wbArg_t args; int imageWidth; int imageHeight; int imageChannels; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; const char * inputImageFile; //@@ Insert more code here float *deviceInputImageData; float *deviceOutputImageData; unsigned char *deviceCastImageData; unsigned char *deviceGrayScaleData; unsigned int *deviceHistogram; float *deviceHistoScan; unsigned int *hostHistogram; float *hostHistoScan; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); wbTime_start(Generic, "Importing data and creating memory on host"); inputImage = wbImport(inputImageFile); imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); wbTime_stop(Generic, "Importing data and creating memory on host"); //@@ insert code here hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); cudaMalloc((void**)&deviceInputImageData, sizeof(float) * imageWidth * imageHeight * imageChannels); cudaMalloc((void**)&deviceOutputImageData, sizeof(float) * imageWidth * imageHeight * imageChannels); cudaMalloc((void**)&deviceCastImageData, sizeof(unsigned char) * imageWidth * imageHeight * imageChannels); cudaMalloc((void**)&deviceGrayScaleData, sizeof(unsigned char) * imageWidth * imageHeight); cudaMalloc((void**)&deviceHistogram, sizeof(unsigned int) * HISTOGRAM_LENGTH); cudaMalloc((void**)&deviceHistoScan, sizeof(float) * HISTOGRAM_LENGTH); cudaMemcpy(deviceInputImageData, hostInputImageData, sizeof(float) * imageWidth * imageHeight * imageChannels, cudaMemcpyHostToDevice); cudaMemset(deviceHistogram, 0, sizeof(unsigned int) * HISTOGRAM_LENGTH); cudaMemset(deviceHistoScan, 0.0f, sizeof(float) * HISTOGRAM_LENGTH); int imageDataSize = imageWidth * imageHeight * imageChannels; int imageSize = imageWidth * imageHeight; convertFloatToChar<<<(imageDataSize-1)/1024+1, 1024>>>(deviceInputImageData, deviceCastImageData, imageDataSize); dim3 dimBlock(12, 12, 1); dim3 dimGrid((imageWidth - 1)/12 + 1, (imageHeight - 1)/12 + 1, 1); RGBToGrayScale<<<dimGrid, dimBlock>>>(deviceCastImageData, deviceGrayScaleData, imageSize, imageWidth, imageHeight, imageChannels); histogram<<<(imageSize-1)/1024+1, 1024>>>(deviceGrayScaleData, deviceHistogram, imageSize); hostHistogram = (unsigned int*) malloc(sizeof(unsigned int) * HISTOGRAM_LENGTH); memset(hostHistogram, 0, sizeof(unsigned int) * HISTOGRAM_LENGTH); cudaMemcpy(hostHistogram, deviceHistogram, sizeof(unsigned int) * HISTOGRAM_LENGTH, cudaMemcpyDeviceToHost); hostHistoScan = (float*) malloc(sizeof(float) * HISTOGRAM_LENGTH); memset(hostHistoScan, 0.0f, sizeof(float) * HISTOGRAM_LENGTH); /* hostHistoScan[0] = prob(hostHistogram[0], imageWidth, imageHeight); for (int i = 1; i < 256; i++) { hostHistoScan[i] = hostHistoScan[i-1] + prob(hostHistogram[i], imageWidth, imageHeight); } */ scan<<<((HISTOGRAM_LENGTH/2)-1)/128+1, 128>>>(deviceHistogram, deviceHistoScan, HISTOGRAM_LENGTH, imageWidth, imageHeight); // cudaMemcpy(hostHistoScan, deviceHistoScan, sizeof(float) * HISTOGRAM_LENGTH, cudaMemcpyDeviceToHost); /* float cdfmin = hostHistoScan[0]; for (int i = 1; i < 256; i++) { cdfmin = min(cdfmin, hostHistoScan[i]); }*/ float *deviceMin; cudaMalloc((void**)&deviceMin, sizeof(float)*2); reduceMin<<<((HISTOGRAM_LENGTH/2)-1)/128+1, 128>>>(deviceHistoScan, deviceMin, HISTOGRAM_LENGTH); float *hostMin; hostMin = (float*)malloc(sizeof(float) * 2); cudaMemcpy(hostMin, deviceMin, sizeof(float) * 2, cudaMemcpyDeviceToHost); float cdfmin = hostMin[0]; // cudaMemcpy(deviceHistoScan, hostHistoScan, sizeof(float) * HISTOGRAM_LENGTH, cudaMemcpyHostToDevice); correct_color<<<(imageDataSize-1)/1024+1, 1024>>>(deviceCastImageData, deviceHistoScan, cdfmin, imageDataSize); convertCharToFloat<<<(imageDataSize-1)/1024+1, 1024>>>(deviceCastImageData, deviceOutputImageData, imageDataSize); // hist_eq<<<(imageDataSize-1)/1024+1, 1024>>>(deviceCastImageData, deviceOutputImageData, deviceHistoScan, cdfmin, imageDataSize); cudaMemcpy(hostOutputImageData, deviceOutputImageData, sizeof(float) * imageDataSize, cudaMemcpyDeviceToHost); wbSolution(args, outputImage); //@@ insert code here cudaFree(deviceInputImageData); cudaFree(deviceOutputImageData); cudaFree(deviceCastImageData); cudaFree(deviceGrayScaleData); cudaFree(deviceHistogram); cudaFree(deviceHistoScan); free(hostHistogram); free(hostHistoScan); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
7418e9763ece42ae348f7718d42658914b973f20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <random> #include <algorithm> #include <vector> #include "moments.h" #definehipLaunchKernelGGL(( GALL) , dim3(dG_all), dim3(dB_all) , 0, 0, MomentsG::MomentsGParameters* pars, Grids* grids) : grids_(grids), pars_(pars) { G_lm = nullptr; dens_ptr = nullptr; upar_ptr = nullptr; tpar_ptr = nullptr; tprp_ptr = nullptr; qpar_ptr = nullptr; qprp_ptr = nullptr; size_t lhsize = grids_->size_G; // printf("nspecies = %d and size_G = %d \n",grids_->Nspecies, (int) grids_->size_G); checkCuda(hipMalloc((void**) &G_lm, lhsize)); hipMemset(G_lm, 0., lhsize); float * vts_h; hipHostMalloc( &vts_h, sizeof(float) * grids_->Nspecies ); float * tzs_h; hipHostMalloc( &tzs_h, sizeof(float) * grids_->Nspecies ); float * zts_h; hipHostMalloc( &zts_h, sizeof(float) * grids_->Nspecies ); float * nts_h; hipHostMalloc( &nts_h, sizeof(float) * grids_->Nspecies ); float * nzs_h; hipHostMalloc( &nzs_h, sizeof(float) * grids_->Nspecies ); float * r2s_h; hipHostMalloc( &r2s_h, sizeof(float) * grids_->Nspecies ); float * tps_h; hipHostMalloc( &tps_h, sizeof(float) * grids_->Nspecies ); float * fps_h; hipHostMalloc( &fps_h, sizeof(float) * grids_->Nspecies ); float * ups_h; hipHostMalloc( &ups_h, sizeof(float) * grids_->Nspecies ); float * aps_h; hipHostMalloc( &aps_h, sizeof(float) * grids_->Nspecies ); float * qns_h; hipHostMalloc( &qns_h, sizeof(float) * grids_->Nspecies ); float * nu_ss_h; hipHostMalloc( &nu_ss_h, sizeof(float) * grids_->Nspecies ); int * typ_h; hipHostMalloc( &typ_h, sizeof(int) * grids_->Nspecies ); for (int is=0; is<grids_->Nspecies; is++) { vts_h[is] = pars_->species_h[is].vt; tzs_h[is] = pars_->species_h[is].tz; zts_h[is] = pars_->species_h[is].zt; nts_h[is] = pars_->species_h[is].nt; nzs_h[is] = pars_->species_h[is].nz; r2s_h[is] = pars_->species_h[is].rho2; tps_h[is] = pars_->species_h[is].tprim; fps_h[is] = pars_->species_h[is].fprim; ups_h[is] = pars_->species_h[is].uprim; aps_h[is] = pars_->species_h[is].as; qns_h[is] = pars_->species_h[is].qneut; nu_ss_h[is] = pars_->species_h[is].nu_ss; typ_h[is] = pars_->species_h[is].type; } checkCuda(hipMalloc( &vts, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &zts, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &tzs, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &nts, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &nzs, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &aps, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &r2s, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &qns, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &tps, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &fps, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &ups, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &nu_ss, sizeof(float) * grids_->Nspecies ) ); checkCuda(hipMalloc( &typ, sizeof(int) * grids_->Nspecies ) ); CP_TO_GPU(vts, vts_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(tzs, tzs_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(zts, zts_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(nts, nts_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(nzs, nzs_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(r2s, r2s_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(tps, tps_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(fps, fps_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(ups, ups_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(aps, aps_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(qns, qns_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(nu_ss, nu_ss_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(typ, typ_h, sizeof(int) *grids_->Nspecies); hipHostFree(vts_h); hipHostFree(tzs_h); hipHostFree(zts_h); hipHostFree(nts_h); hipHostFree(nzs_h); hipHostFree(r2s_h); hipHostFree(tps_h); hipHostFree(fps_h); hipHostFree(ups_h); hipHostFree(aps_h); hipHostFree(qns_h); hipHostFree(nu_ss_h); hipHostFree(typ_h); dens_ptr = (hipComplex**) malloc(sizeof(hipComplex*) * grids_->Nspecies); upar_ptr = (hipComplex**) malloc(sizeof(hipComplex*) * grids_->Nspecies); tpar_ptr = (hipComplex**) malloc(sizeof(hipComplex*) * grids_->Nspecies); tprp_ptr = (hipComplex**) malloc(sizeof(hipComplex*) * grids_->Nspecies); qpar_ptr = (hipComplex**) malloc(sizeof(hipComplex*) * grids_->Nspecies); qprp_ptr = (hipComplex**) malloc(sizeof(hipComplex*) * grids_->Nspecies); printf("Allocated a G_lm array of size %.2f MB\n", lhsize/1024./1024.); int Nm = grids_->Nm; int Nl = grids_->Nl; for(int s=0; s<grids->Nspecies; s++) { // set up pointers for named moments that point to parts of G_lm int l,m; l = 0, m = 0; // density if(l<Nl && m<Nm) dens_ptr[s] = G(l,m,s); l = 0, m = 1; // u_parallel if(l<Nl && m<Nm) upar_ptr[s] = G(l,m,s); l = 0, m = 2; // T_parallel / sqrt(2) if(l<Nl && m<Nm) tpar_ptr[s] = G(l,m,s); l = 0, m = 3; // q_parallel / sqrt(6) if(l<Nl && m<Nm) qpar_ptr[s] = G(l,m,s); l = 1, m = 0; // T_perp if(l<Nl && m<Nm) tprp_ptr[s] = G(l,m,s); l = 1, m = 1; // q_perp if(l<Nl && m<Nm) qprp_ptr[s] = G(l,m,s); } int nn1, nn2, nn3, nt1, nt2, nt3, nb1, nb2, nb3; if (pars_->ks) { printf("initializing Kuramoto-Sivashinsky\n"); nn1 = grids_->Nyc; nt1 = min(nn1, 128); nb1 = 1 + (nn1-1)/nt1; nn2 = 1; nt2 = min(nn2, 1); nb2 = 1 + (nn2-1)/nt2; nn3 = 1; nt3 = min(nn3, 1); nb3 = 1 + (nn3-1)/nt3; dB_all = dim3(nt1, nt2, nt3); dG_all = dim3(nb1, nb2, nb3); dimBlock = dim3(nt1, nt2, nt3); dimGrid = dim3(nb1, nb2, nb3); return; } if (pars_->vp) { printf("initializing Vlasov-Poisson\n"); nn1 = grids_->Nyc; nt1 = min(nn1, 128); nb1 = 1 + (nn1-1)/nt1; nn2 = 1; nt2 = min(nn2, 1); nb2 = 1 + (nn2-1)/nt2; nn3 = 1; nt3 = min(nn3, 1); nb3 = 1 + (nn3-1)/nt3; dB_all = dim3(nt1, nt2, nt3); dG_all = dim3(nb1, nb2, nb3); return; } // nn1 = grids_->NxNycNz; nt1 = min(32, nn1); nb1 = 1 + (nn1-1)/nt1; // nn2 = 1; nt2 = min( 4, Nl); nb2 = 1 + (nn2-1)/nt2; // nn3 = 1; nt3 = min( 4, Nm); nb3 = 1 + (nn3-1)/nt3; // dimBlock = dim3(nt1, nt2, nt3); // dimGrid = dim3(nb1, nb2, nb3); // dimBlock = dim3(32, min(4, Nl), min(4, Nm)); // dimGrid = dim3((grids_->NxNycNz-1)/dimBlock.x+1, 1, 1); nn1 = grids_->Nyc*grids_->Nx; nt1 = min(nn1, 32); nb1 = (nn1-1)/nt1 + 1; nn2 = grids_->Nz; nt2 = min(nn2, 32); nb2 = (nn2-1)/nt2 + 1; nn3 = grids_->Nspecies*grids_->Nm*grids_->Nl; nt3 = min(nn3, 1); nb3 = (nn3-1)/nt3 + 1; dB_all = dim3(nt1, nt2, nt3); dG_all = dim3(nb1, nb2, nb3); } MomentsG::~MomentsG() { free (dens_ptr); free (upar_ptr); free (tpar_ptr); free (qpar_ptr); free (qprp_ptr); if ( G_lm ) hipFree ( G_lm ); } void MomentsG::set_zero(void) { hipMemset(G_lm, 0., grids_->size_G); } void MomentsG::initVP(double *time) { hipComplex *init_h = nullptr; hipHostMalloc((void**) &init_h, sizeof(hipComplex)*grids_->Nyc*grids_->Nm); for (int ig = 0; ig<grids_->Nyc*grids_->Nm; ig++) { init_h[ig].x = 0.; init_h[ig].y = 0.; } // start with something simple: if (!pars_->restart) init_h[0].x = 1.; // This is the Maxwellian background init_h[1 + grids_->Nyc * 2].x = pars_->init_amp; // This is a temperature perturbation (up to a factor of sqrt(2)). CP_TO_GPU(G_lm, init_h, sizeof(hipComplex)*grids_->Nyc*grids_->Nm); hipHostFree(init_h); if (pars_->restart) this->restart_read(time); hipDeviceSynchronize(); } void MomentsG::initialConditions(double *time) { size_t momsize = sizeof(hipComplex)*grids_->NxNycNz; hipComplex *init_h = nullptr; hipHostMalloc((void**) &init_h, momsize); std::random_device rd; std::mt19937 gen(rd()); std::normal_distribution<float> ramp(0., pars_->init_amp); for (int idy = 0; idy<grids_->Nyc; idy++) { init_h[idy].x = 0.; init_h[idy].y = 0.; } for (int idy = 1; idy<grids_->Naky; idy++) { init_h[idy].x = ramp(gen); init_h[idy].y = ramp(gen); } // init_h[1].x = 0.5; // init_h[2].y = -0.25; CP_TO_GPU(G_lm, init_h, momsize); hipHostFree(init_h); // restart_read goes here, if restart == T // as in gs2, if restart_read is true, we want to *add* the restart values to anything // that has happened above and also move the value of time up to the end of the previous run if(pars_->restart) { DEBUG_PRINT("reading restart file \n"); this->restart_read(time); } hipDeviceSynchronize(); // checkCuda(hipGetLastError()); // return hipGetLastError(); } void MomentsG::initialConditions(float* z_h, double* time) { checkCuda(hipGetLastError()); hipDeviceSynchronize(); // to make sure its safe to operate on host memory size_t momsize = sizeof(hipComplex)*grids_->NxNycNz; hipComplex *init_h = nullptr; hipHostMalloc(&init_h, momsize); checkCuda(hipGetLastError()); for (int idx=0; idx<grids_->NxNycNz; idx++) { init_h[idx].x = 0.; init_h[idx].y = 0.; } if (pars_->ks) { init_h[1].x = 0.5; init_h[2].y = -0.25; } else { if(pars_->init_single) { //initialize single mode int iky = pars_->iky_single; int ikx = pars_->ikx_single; int NKX = 1; if (iky == 0 && ikx<1+(grids_->Nx-1)/3) NKX = 2; // reality condition for tertiary tests for (int j = 0; j<NKX; j++) { if (j==1) ikx = grids_->Nx-ikx; DEBUG_PRINT("ikx, iky: %d \t %d \n",ikx, iky); // float fac; // if(pars_->nlpm_test && iky==0) fac = .5; // else fac = 1.; // DEBUG_PRINT("fac = %f \n",fac); for(int iz=0; iz<grids_->Nz; iz++) { int index = iky + grids_->Nyc*ikx + grids_->NxNyc*iz; init_h[index].x = pars_->init_amp; //*fac; init_h[index].y = 0.; //init_amp; } } } else { srand(22); float samp; int idx; // printf("Hacking the initial condition! \n"); for(int i=0; i < 1 + (grids_->Nx - 1)/3; i++) { for(int j=1; j < 1 + (grids_->Ny - 1)/3; j++) { samp = pars_->init_amp; float ra = (float) (samp * (rand()-RAND_MAX/2) / RAND_MAX); float rb = (float) (samp * (rand()-RAND_MAX/2) / RAND_MAX); for (int js=0; js < 2; js++) { if (i==0) { idx = i; } else { idx = (js==0) ? i : grids_->Nx-i; } for(int k=0; k<grids_->Nz; k++) { int index = j + grids_->Nyc*(idx + grids_->Nx*k); if (js == 0) { init_h[index].x = ra; init_h[index].y = rb; } else { init_h[index].x = rb; init_h[index].y = ra; } if (pars_->kpar_init < 0.) { init_h[index].x *= (cos( -pars_->kpar_init *z_h[k]/pars_->Zp) + cos((-pars_->kpar_init+1.)*z_h[k]/pars_->Zp)); init_h[index].y *= (cos( -pars_->kpar_init *z_h[k]/pars_->Zp) + cos((-pars_->kpar_init+1.)*z_h[k]/pars_->Zp)); } else { init_h[index].x *= cos(pars_->kpar_init*z_h[k]/pars_->Zp); init_h[index].y *= cos(pars_->kpar_init*z_h[k]/pars_->Zp); } // printf("init_h[%d] = (%e, %e) \n",index,init_h[index].x,init_h[index].y); } } if (pars_->random_init) { for (int k=0; k<grids_->Nz; k++) { int index = j + grids_->Nyc*(idx + grids_->Nx*k); init_h[index].x = 0.; init_h[index].y = 0.; } for (int jj=1; jj<1+(grids_->Nz-1)/3; jj++) { float ka = (float) (samp * rand() / RAND_MAX); float pa = (float) (M_PI * (rand()-RAND_MAX/2) / RAND_MAX); float kb = (float) (samp * rand() / RAND_MAX); float pb = (float) (M_PI * (rand()-RAND_MAX/2) / RAND_MAX); for (int k=0; k<grids_->Nz; k++) { int index = j + grids_->Nyc*(idx + grids_->Nx*k); init_h[index].x += ka*sin((float) jj*z_h[k] + pa); init_h[index].y += kb*sin((float) jj*z_h[k] + pb); } } } } } } } // copy initial condition into device memory for (int is=0; is<grids_->Nspecies; is++) { switch (pars_->initf) { case inits::density : CP_TO_GPU(dens_ptr[is], init_h, momsize); break; case inits::upar : CP_TO_GPU(upar_ptr[is], init_h, momsize); break; case inits::tpar : CP_TO_GPU(tpar_ptr[is], init_h, momsize); break; case inits::tperp : CP_TO_GPU(tprp_ptr[is], init_h, momsize); break; case inits::qpar : CP_TO_GPU(qpar_ptr[is], init_h, momsize); break; case inits::qperp : CP_TO_GPU(qprp_ptr[is], init_h, momsize); break; } checkCuda(hipGetLastError()); } hipHostFree(init_h); // restart_read goes here, if restart == T // as in gs2, if restart_read is true, we want to *add* the restart values to anything // that has happened above and also move the value of time up to the end of the previous run if(pars_->restart) { DEBUG_PRINT("reading restart file \n"); this->restart_read(time); } hipDeviceSynchronize(); checkCuda(hipGetLastError()); DEBUG_PRINT("initial conditions set \n"); } void MomentsG::scale(double scalar) {scale_kernel GALL (G_lm, scalar);} void MomentsG::scale(hipComplex scalar) {scale_kernel GALL (G_lm, scalar);} void MomentsG::mask(void) {maskG GALL (this->G_lm);} void MomentsG::getH(hipComplex* J0phi) {Hkernel GALL (G_lm, J0phi);} void MomentsG::getG(hipComplex* J0phi) {Gkernel GALL (G_lm, J0phi);} void MomentsG::rescale(float * phi_max) { rescale_kernel GALL (G_lm, phi_max, grids_->Nspecies*grids_->Nm*grids_->Nl); } void MomentsG::add_scaled(double c1, MomentsG* G1, double c2, MomentsG* G2) { bool neqfix = !pars_->eqfix; add_scaled_kernel GALL (G_lm, c1, G1->G_lm, c2, G2->G_lm, neqfix); } void MomentsG::add_scaled(double c1, MomentsG* G1, double c2, MomentsG* G2, double c3, MomentsG* G3) { bool neqfix = !pars_->eqfix; add_scaled_kernel GALL (G_lm, c1, G1->G_lm, c2, G2->G_lm, c3, G3->G_lm, neqfix); } void MomentsG::add_scaled(double c1, MomentsG* G1, double c2, MomentsG* G2, double c3, MomentsG* G3, double c4, MomentsG* G4) { bool neqfix = !pars_->eqfix; add_scaled_kernel GALL (G_lm, c1, G1->G_lm, c2, G2->G_lm, c3, G3->G_lm, c4, G4->G_lm, neqfix); } void MomentsG::add_scaled(double c1, MomentsG* G1, double c2, MomentsG* G2, double c3, MomentsG* G3, double c4, MomentsG* G4, double c5, MomentsG* G5) { bool neqfix = !pars_->eqfix; add_scaled_kernel GALL (G_lm, c1, G1->G_lm, c2, G2->G_lm, c3, G3->G_lm, c4, G4->G_lm, c5, G5->G_lm, neqfix); } void MomentsG::reality(int ngz) { dim3 dB; dim3 dG; int ngx = (grids_->Nx-1)/3 + 1; dB.x = 32; dG.x = (ngx-1)/dB.x + 1; int ngy = grids_->Nz; dB.y = 8; dG.y = (ngy-1)/dB.y + 1; dB.z = 4; dG.z = (ngz-1)/dB.z + 1; hipLaunchKernelGGL(( reality_kernel) , dim3(dG), dim3(dB) , 0, 0, G_lm, ngz); } void MomentsG::restart_write(double* time) { float* G_out; hipComplex* G_h; int retval; int ncres; int moments_out[7]; size_t start[7]; size_t count[7]; int Nx = grids_->Nx; int Nakx = grids_->Nakx; int Naky = grids_->Naky; int Nyc = grids_->Nyc; int Nz = grids_->Nz; int nspec = pars_->nspec; int Nm = grids_->Nm; int Nl = grids_->Nl; // handles int id_ri, id_nz, id_Nkx, id_Nky; int id_nh, id_nl, id_ns; int id_G, id_time; char strb[512]; strcpy(strb, pars_->restart_to_file.c_str()); // if(pars_->restart) { // ultimately, appending to an existing file // if appending, are the time values consistent? // inquire/define the variable names // } else { int ri=2; if (retval = nc_create(strb, NC_CLOBBER, &ncres)) ERR(retval); if (retval = nc_def_dim(ncres, "ri", ri, &id_ri)) ERR(retval); if (retval = nc_def_dim(ncres, "Nz", Nz, &id_nz)) ERR(retval); if (retval = nc_def_dim(ncres, "Nkx", Nakx, &id_Nkx)) ERR(retval); if (retval = nc_def_dim(ncres, "Nky", Naky, &id_Nky)) ERR(retval); if (retval = nc_def_dim(ncres, "Nl", Nl, &id_nl)) ERR(retval); if (retval = nc_def_dim(ncres, "Nm", Nm, &id_nh)) ERR(retval); if (retval = nc_def_dim(ncres, "Ns", nspec, &id_ns)) ERR(retval); moments_out[0] = id_ns; count[0] = nspec; moments_out[1] = id_nh; count[1] = Nm; moments_out[2] = id_nl; count[2] = Nl; moments_out[3] = id_nz; count[3] = Nz; moments_out[4] = id_Nkx; count[4] = Nakx; moments_out[5] = id_Nky; count[5] = Naky; moments_out[6] = id_ri; count[6] = ri; start[0] = 0; start[1] = 0; start[2] = 0; start[3] = 0; start[4] = 0; start[5] = 0; start[6] = 0; if (retval = nc_def_var(ncres, "G", NC_FLOAT, 7, moments_out, &id_G)) ERR(retval); if (retval = nc_def_var(ncres, "time", NC_DOUBLE, 0, 0, &id_time)) ERR(retval); if (retval = nc_enddef(ncres)) ERR(retval); if (retval = nc_put_var(ncres, id_time, time)) ERR(retval); unsigned int itot, jtot; jtot = Nx * Nyc * Nz * Nm * Nl * nspec; itot = Nakx * Naky * Nz * Nm * Nl * nspec; hipHostMalloc((void**) &G_h, sizeof(hipComplex) * jtot); hipHostMalloc((void**) &G_out, sizeof(float) * itot * 2); for (unsigned int index=0; index < jtot; index++) {G_h[index].x = 0.; G_h[index].y = 0.;} for (unsigned int index=0; index < 2*itot; index++) G_out[index] = 0.; CP_TO_CPU(G_h, G_lm, sizeof(hipComplex)*jtot); for (int is=0; is < nspec; is++) { for (int m=0; m < Nm; m++) { for (int l=0; l < Nl; l++) { for (int k=0; k < Nz; k++) { for (int i=0; i < 1 + (Nx-1)/3; i++) { for (int j=0; j < Naky; j++) { unsigned int index = j + Nyc *(i + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_out = j + Naky*(i + Nakx*(k + Nz*(l + Nl*(m + Nm*is)))); G_out[2*index_out] = G_h[index].x; G_out[2*index_out+1] = G_h[index].y; } } for (int i=2*Nx/3+1; i < Nx; i++) { for (int j=0; j < Naky; j++) { int it = i-2*Nx/3+(Nx-1)/3; // not very clear, depends on arcane integer math rules unsigned int index = j + Nyc *(i + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_out = j + Naky*(it + Nakx*(k + Nz*(l + Nl*(m + Nm*is)))); G_out[2*index_out] = G_h[index].x; G_out[2*index_out+1] = G_h[index].y; } } } } } } if (retval = nc_put_vara(ncres, id_G, start, count, G_out)) ERR(retval); hipHostFree(G_out); hipHostFree(G_h); if (retval = nc_close(ncres)) ERR(retval); } void MomentsG::restart_read(double* time) { float scale; float* G_in; hipComplex* G_h; hipComplex* G_hold; int retval; int ncres; size_t lhsize = grids_->size_G; size_t ldum; int Nx = grids_->Nx; int Nakx = grids_->Nakx; int Naky = grids_->Naky; int Ny = grids_->Ny; int Nyc = grids_->Nyc; int Nz = grids_->Nz; int nspec = pars_->nspec; int Nm = grids_->Nm; int Nl = grids_->Nl; // handles int id_nz, id_Nkx, id_Nky; int id_nh, id_nl, id_ns; int id_G, id_time; char stra[NC_MAX_NAME+1]; char strb[512]; strcpy(strb, pars_->restart_from_file.c_str()); if (retval = nc_open(strb, NC_NOWRITE, &ncres)) { printf("file: %s \n",strb); ERR(retval);} if (retval = nc_inq_dimid(ncres, "Nkx", &id_Nkx)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Nky", &id_Nky)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Nz", &id_nz)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Nl", &id_nl)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Nm", &id_nh)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Ns", &id_ns)) ERR(retval); if (retval = nc_inq_varid(ncres, "G", &id_G)) ERR(retval); if (retval = nc_inq_varid(ncres, "time", &id_time)) ERR(retval); if (retval = nc_inq_dim(ncres, id_ns, stra, &ldum)) ERR(retval); if (nspec-pars_->ns_add != (int) ldum) { printf("Cannot restart because of nspec mismatch: %d \t %zu \n", nspec, ldum); exit (1); } if (retval = nc_inq_dim(ncres, id_nh, stra, &ldum)) ERR(retval); if (Nm-pars_->nm_add != (int) ldum) { printf("Cannot restart because of Nm mismatch: %d \t %zu \n", Nm, ldum); exit (1); } if (retval = nc_inq_dim(ncres, id_nl, stra, &ldum)) ERR(retval); if (Nl-pars_->nl_add != (int) ldum) { printf("Cannot restart because of Nl mismatch: %d \t %zu \n", Nl, ldum); exit (1); } if (retval = nc_inq_dim(ncres, id_nz, stra, &ldum)) ERR(retval); if (Nz != (int) ldum*pars_->ntheta_mult) { printf("Cannot restart because of nz mismatch: %d \t %zu \n", Nz, ldum*pars_->ntheta_mult); exit (1); } if (retval = nc_inq_dim(ncres, id_Nkx, stra, &ldum)) ERR(retval); if (1 + 2*((Nx/pars_->nx_mult-1)/3) != (int) ldum) { printf("Cannot restart because of Nkx mismatch: %d \t %zu \n", Nakx, ldum); exit (1); } if (retval = nc_inq_dim(ncres, id_Nky, stra, &ldum)) ERR(retval); if (1 + (Ny/pars_->ny_mult-1)/3 != (int) ldum) { printf("Cannot restart because of Nky mismatch: %d \t %zu \n", Naky, ldum); exit (1); } unsigned int itot; // itot = Nakx * Naky * Nz * Nm * Nl * nspec; itot = Nx * Nyc * Nz * Nm * Nl * nspec; unsigned int iitot = Nakx * Naky * Nz * Nm * Nl * nspec; if (pars_->domain_change) { int old_Nakx = 1 + 2 * ((Nx/pars_->nx_mult - 1)/3); int old_Naky = 1 + ((Ny/pars_->ny_mult - 1)/3); int old_Nz = Nz/pars_->ntheta_mult; int old_Nl = Nl - pars_->nl_add; int old_Nm = Nm - pars_->nm_add; int old_ns = nspec - pars_->ns_add; iitot = old_Nakx * old_Naky * old_Nz * old_Nm * old_Nl * old_ns; } hipHostMalloc((void**) &G_hold, lhsize); hipHostMalloc((void**) &G_h, lhsize); hipHostMalloc((void**) &G_in, sizeof(float) * iitot * 2); for (unsigned int index=0; index < itot; index++) {G_hold[index].x = 0.; G_hold[index].y = 0.;} for (unsigned int index=0; index < itot; index++) {G_h[index].x = 0.; G_h[index].y = 0.;} for (unsigned int index=0; index<2*iitot; index++) {G_in[index] = 0.;} CP_TO_CPU(G_hold, G_lm, sizeof(hipComplex)*itot); if (retval = nc_get_var(ncres, id_G, G_in)) ERR(retval); if (retval = nc_get_var(ncres, id_time, time)) ERR(retval); if (retval = nc_close(ncres)) ERR(retval); scale = pars_->scale; if (!pars_->domain_change) { for (int is=0; is < nspec; is++) { for (int m=0; m < Nm; m++) { for (int l=0; l < Nl; l++) { for (int k=0; k < Nz; k++) { for (int i=0; i < 1 + (Nx-1)/3; i++) { for (int j=0; j < Naky; j++) { unsigned int index = j + Nyc *(i + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_in = j + Naky*(i + Nakx*(k + Nz*(l + Nl*(m + Nm*is)))); G_h[index].x = scale * G_in[2*index_in] + G_hold[index].x; G_h[index].y = scale * G_in[2*index_in+1] + G_hold[index].y; } } for (int i=2*Nx/3+1; i < Nx; i++) { for (int j=0; j < Naky; j++) { int it = i-2*Nx/3+(Nx-1)/3; // not very clear, depends on arcane integer math rules unsigned int index = j + Nyc *(i + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_in = j + Naky*(it + Nakx*(k + Nz*(l + Nl*(m + Nm*is)))); G_h[index].x = scale * G_in[2*index_in] + G_hold[index].x; G_h[index].y = scale * G_in[2*index_in+1] + G_hold[index].y; } } } } } } } else { int old_Naky = 1 + (Ny/pars_->ny_mult - 1)/3; int jj; int old_Nakx = 1 + 2*((Nx/pars_->nx_mult - 1)/3); int ii; int old_Nx = Nx/pars_->nx_mult; int old_Nz = Nz/pars_->ntheta_mult; // not yet implemented int old_Nm = Nm - pars_->nm_add; int old_Nl = Nl - pars_->nl_add; int old_ns = nspec - pars_->ns_add; for (int is=0; is < min(old_ns, nspec); is++) { for (int m=0; m < min(old_Nm, Nm); m++) { for (int l=0; l < min(old_Nl, Nl); l++) { for (int k=0; k < Nz; k++) { for (int i=0; i < 1 + old_Nakx/2; i++) { ii = i * pars_->x0_mult; if (ii < 1 + Nakx/2) { for (int j=0; j < old_Naky; j++) { jj = j * pars_->y0_mult; if (jj < Naky) { unsigned int index = jj + Nyc *(ii + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_in = j + old_Naky*(i + old_Nakx*(k + Nz*(l + old_Nl*(m + old_Nm*is)))); G_h[index].x = scale * G_in[2*index_in] + G_hold[index].x; G_h[index].y = scale * G_in[2*index_in+1] + G_hold[index].y; } } } } for (int i=2*old_Nx/3+1; i < old_Nx; i++) { ii =(i-old_Nx) * pars_->x0_mult + Nx; if ((i-old_Nx) * pars_->x0_mult + 1 + Nakx/2 > 0) { for (int j=0; j < old_Naky; j++) { jj = j * pars_->y0_mult; if (jj < Naky) { int it = i-2*old_Nx/3+(old_Nx-1)/3; // not very clear, depends on arcane integer math rules unsigned int index = jj + Nyc *(ii + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_in = j + old_Naky*(it + old_Nakx*(k + Nz*(l + old_Nl*(m + old_Nm*is)))); G_h[index].x = scale * G_in[2*index_in] + G_hold[index].x; G_h[index].y = scale * G_in[2*index_in+1] + G_hold[index].y; } } } } } } } } } hipHostFree(G_in); hipHostFree(G_hold); unsigned int jtot = Nx * Nyc * Nz * Nm * Nl * nspec; CP_TO_GPU(G_lm, G_h, sizeof(hipComplex)*jtot); hipHostFree(G_h); } void MomentsG::qvar(int N) { hipComplex* G_h; // int Nk = grids_->Nyc; // Nk = 1; int Nk = grids_->NxNycNz; G_h = (hipComplex*) malloc (sizeof(hipComplex)*N); for (int i=0; i<N; i++) {G_h[i].x = 0.; G_h[i].y = 0.;} CP_TO_CPU (G_h, G_lm, N*sizeof(hipComplex)); printf("\n"); // for (int i=0; i<N; i++) printf("var(%d,%d) = (%e, %e) \n", i%Nk, i/Nk, G_h[i].x, G_h[i].y); // for (int i=N-20; i<N; i++) printf("var(%d) = (%e, %e) \n", i, G_h[i].x, G_h[i].y); for (int i=0; i<N; i++) printf("m var(%d,%d) = (%e, %e) \n", i%Nk, i/Nk, G_h[i].x, G_h[i].y); printf("\n"); free (G_h); } void MomentsG::update_tprim(double time) { // this is a proof-of-principle hack. typically nothing will happen here // for one species (or the first species in the species list): // adjust tprim according to the function // if t < t0: // tprim = tprim_0 // if t > t0: // if (t < tf) tprim = tprim_0 + (tprim_0 - tprim_f)/(t0-tf)*(t-t0) // else tprim = tprim_f if (pars_->tp_t0 > -0.5) { if (time < (double) pars_->tp_t0) { float tp = pars_->tprim0; CP_TO_GPU (tps, &tp, sizeof(float)); } else { if (time < (double) pars_->tp_tf) { float tfac = (float) time; float tprim0 = pars_->tprim0; float tprimf = pars_->tprimf; float t0 = pars_->tp_t0; float tf = pars_->tp_tf; float tp = tprim0 + (tprim0-tprimf)/(t0-tf)*(tfac-t0); CP_TO_GPU (tps, &tp, sizeof(float)); } else { float tp = pars_->tprimf; CP_TO_GPU (tps, &tp, sizeof(float)); } } } }
7418e9763ece42ae348f7718d42658914b973f20.cu
#include <random> #include <algorithm> #include <vector> #include "moments.h" #define GALL <<< dG_all, dB_all >>> MomentsG::MomentsG(Parameters* pars, Grids* grids) : grids_(grids), pars_(pars) { G_lm = nullptr; dens_ptr = nullptr; upar_ptr = nullptr; tpar_ptr = nullptr; tprp_ptr = nullptr; qpar_ptr = nullptr; qprp_ptr = nullptr; size_t lhsize = grids_->size_G; // printf("nspecies = %d and size_G = %d \n",grids_->Nspecies, (int) grids_->size_G); checkCuda(cudaMalloc((void**) &G_lm, lhsize)); cudaMemset(G_lm, 0., lhsize); float * vts_h; cudaMallocHost( &vts_h, sizeof(float) * grids_->Nspecies ); float * tzs_h; cudaMallocHost( &tzs_h, sizeof(float) * grids_->Nspecies ); float * zts_h; cudaMallocHost( &zts_h, sizeof(float) * grids_->Nspecies ); float * nts_h; cudaMallocHost( &nts_h, sizeof(float) * grids_->Nspecies ); float * nzs_h; cudaMallocHost( &nzs_h, sizeof(float) * grids_->Nspecies ); float * r2s_h; cudaMallocHost( &r2s_h, sizeof(float) * grids_->Nspecies ); float * tps_h; cudaMallocHost( &tps_h, sizeof(float) * grids_->Nspecies ); float * fps_h; cudaMallocHost( &fps_h, sizeof(float) * grids_->Nspecies ); float * ups_h; cudaMallocHost( &ups_h, sizeof(float) * grids_->Nspecies ); float * aps_h; cudaMallocHost( &aps_h, sizeof(float) * grids_->Nspecies ); float * qns_h; cudaMallocHost( &qns_h, sizeof(float) * grids_->Nspecies ); float * nu_ss_h; cudaMallocHost( &nu_ss_h, sizeof(float) * grids_->Nspecies ); int * typ_h; cudaMallocHost( &typ_h, sizeof(int) * grids_->Nspecies ); for (int is=0; is<grids_->Nspecies; is++) { vts_h[is] = pars_->species_h[is].vt; tzs_h[is] = pars_->species_h[is].tz; zts_h[is] = pars_->species_h[is].zt; nts_h[is] = pars_->species_h[is].nt; nzs_h[is] = pars_->species_h[is].nz; r2s_h[is] = pars_->species_h[is].rho2; tps_h[is] = pars_->species_h[is].tprim; fps_h[is] = pars_->species_h[is].fprim; ups_h[is] = pars_->species_h[is].uprim; aps_h[is] = pars_->species_h[is].as; qns_h[is] = pars_->species_h[is].qneut; nu_ss_h[is] = pars_->species_h[is].nu_ss; typ_h[is] = pars_->species_h[is].type; } checkCuda(cudaMalloc( &vts, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &zts, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &tzs, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &nts, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &nzs, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &aps, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &r2s, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &qns, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &tps, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &fps, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &ups, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &nu_ss, sizeof(float) * grids_->Nspecies ) ); checkCuda(cudaMalloc( &typ, sizeof(int) * grids_->Nspecies ) ); CP_TO_GPU(vts, vts_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(tzs, tzs_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(zts, zts_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(nts, nts_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(nzs, nzs_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(r2s, r2s_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(tps, tps_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(fps, fps_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(ups, ups_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(aps, aps_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(qns, qns_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(nu_ss, nu_ss_h, sizeof(float)*grids_->Nspecies); CP_TO_GPU(typ, typ_h, sizeof(int) *grids_->Nspecies); cudaFreeHost(vts_h); cudaFreeHost(tzs_h); cudaFreeHost(zts_h); cudaFreeHost(nts_h); cudaFreeHost(nzs_h); cudaFreeHost(r2s_h); cudaFreeHost(tps_h); cudaFreeHost(fps_h); cudaFreeHost(ups_h); cudaFreeHost(aps_h); cudaFreeHost(qns_h); cudaFreeHost(nu_ss_h); cudaFreeHost(typ_h); dens_ptr = (cuComplex**) malloc(sizeof(cuComplex*) * grids_->Nspecies); upar_ptr = (cuComplex**) malloc(sizeof(cuComplex*) * grids_->Nspecies); tpar_ptr = (cuComplex**) malloc(sizeof(cuComplex*) * grids_->Nspecies); tprp_ptr = (cuComplex**) malloc(sizeof(cuComplex*) * grids_->Nspecies); qpar_ptr = (cuComplex**) malloc(sizeof(cuComplex*) * grids_->Nspecies); qprp_ptr = (cuComplex**) malloc(sizeof(cuComplex*) * grids_->Nspecies); printf("Allocated a G_lm array of size %.2f MB\n", lhsize/1024./1024.); int Nm = grids_->Nm; int Nl = grids_->Nl; for(int s=0; s<grids->Nspecies; s++) { // set up pointers for named moments that point to parts of G_lm int l,m; l = 0, m = 0; // density if(l<Nl && m<Nm) dens_ptr[s] = G(l,m,s); l = 0, m = 1; // u_parallel if(l<Nl && m<Nm) upar_ptr[s] = G(l,m,s); l = 0, m = 2; // T_parallel / sqrt(2) if(l<Nl && m<Nm) tpar_ptr[s] = G(l,m,s); l = 0, m = 3; // q_parallel / sqrt(6) if(l<Nl && m<Nm) qpar_ptr[s] = G(l,m,s); l = 1, m = 0; // T_perp if(l<Nl && m<Nm) tprp_ptr[s] = G(l,m,s); l = 1, m = 1; // q_perp if(l<Nl && m<Nm) qprp_ptr[s] = G(l,m,s); } int nn1, nn2, nn3, nt1, nt2, nt3, nb1, nb2, nb3; if (pars_->ks) { printf("initializing Kuramoto-Sivashinsky\n"); nn1 = grids_->Nyc; nt1 = min(nn1, 128); nb1 = 1 + (nn1-1)/nt1; nn2 = 1; nt2 = min(nn2, 1); nb2 = 1 + (nn2-1)/nt2; nn3 = 1; nt3 = min(nn3, 1); nb3 = 1 + (nn3-1)/nt3; dB_all = dim3(nt1, nt2, nt3); dG_all = dim3(nb1, nb2, nb3); dimBlock = dim3(nt1, nt2, nt3); dimGrid = dim3(nb1, nb2, nb3); return; } if (pars_->vp) { printf("initializing Vlasov-Poisson\n"); nn1 = grids_->Nyc; nt1 = min(nn1, 128); nb1 = 1 + (nn1-1)/nt1; nn2 = 1; nt2 = min(nn2, 1); nb2 = 1 + (nn2-1)/nt2; nn3 = 1; nt3 = min(nn3, 1); nb3 = 1 + (nn3-1)/nt3; dB_all = dim3(nt1, nt2, nt3); dG_all = dim3(nb1, nb2, nb3); return; } // nn1 = grids_->NxNycNz; nt1 = min(32, nn1); nb1 = 1 + (nn1-1)/nt1; // nn2 = 1; nt2 = min( 4, Nl); nb2 = 1 + (nn2-1)/nt2; // nn3 = 1; nt3 = min( 4, Nm); nb3 = 1 + (nn3-1)/nt3; // dimBlock = dim3(nt1, nt2, nt3); // dimGrid = dim3(nb1, nb2, nb3); // dimBlock = dim3(32, min(4, Nl), min(4, Nm)); // dimGrid = dim3((grids_->NxNycNz-1)/dimBlock.x+1, 1, 1); nn1 = grids_->Nyc*grids_->Nx; nt1 = min(nn1, 32); nb1 = (nn1-1)/nt1 + 1; nn2 = grids_->Nz; nt2 = min(nn2, 32); nb2 = (nn2-1)/nt2 + 1; nn3 = grids_->Nspecies*grids_->Nm*grids_->Nl; nt3 = min(nn3, 1); nb3 = (nn3-1)/nt3 + 1; dB_all = dim3(nt1, nt2, nt3); dG_all = dim3(nb1, nb2, nb3); } MomentsG::~MomentsG() { free (dens_ptr); free (upar_ptr); free (tpar_ptr); free (qpar_ptr); free (qprp_ptr); if ( G_lm ) cudaFree ( G_lm ); } void MomentsG::set_zero(void) { cudaMemset(G_lm, 0., grids_->size_G); } void MomentsG::initVP(double *time) { cuComplex *init_h = nullptr; cudaMallocHost((void**) &init_h, sizeof(cuComplex)*grids_->Nyc*grids_->Nm); for (int ig = 0; ig<grids_->Nyc*grids_->Nm; ig++) { init_h[ig].x = 0.; init_h[ig].y = 0.; } // start with something simple: if (!pars_->restart) init_h[0].x = 1.; // This is the Maxwellian background init_h[1 + grids_->Nyc * 2].x = pars_->init_amp; // This is a temperature perturbation (up to a factor of sqrt(2)). CP_TO_GPU(G_lm, init_h, sizeof(cuComplex)*grids_->Nyc*grids_->Nm); cudaFreeHost(init_h); if (pars_->restart) this->restart_read(time); cudaDeviceSynchronize(); } void MomentsG::initialConditions(double *time) { size_t momsize = sizeof(cuComplex)*grids_->NxNycNz; cuComplex *init_h = nullptr; cudaMallocHost((void**) &init_h, momsize); std::random_device rd; std::mt19937 gen(rd()); std::normal_distribution<float> ramp(0., pars_->init_amp); for (int idy = 0; idy<grids_->Nyc; idy++) { init_h[idy].x = 0.; init_h[idy].y = 0.; } for (int idy = 1; idy<grids_->Naky; idy++) { init_h[idy].x = ramp(gen); init_h[idy].y = ramp(gen); } // init_h[1].x = 0.5; // init_h[2].y = -0.25; CP_TO_GPU(G_lm, init_h, momsize); cudaFreeHost(init_h); // restart_read goes here, if restart == T // as in gs2, if restart_read is true, we want to *add* the restart values to anything // that has happened above and also move the value of time up to the end of the previous run if(pars_->restart) { DEBUG_PRINT("reading restart file \n"); this->restart_read(time); } cudaDeviceSynchronize(); // checkCuda(cudaGetLastError()); // return cudaGetLastError(); } void MomentsG::initialConditions(float* z_h, double* time) { checkCuda(cudaGetLastError()); cudaDeviceSynchronize(); // to make sure its safe to operate on host memory size_t momsize = sizeof(cuComplex)*grids_->NxNycNz; cuComplex *init_h = nullptr; cudaMallocHost(&init_h, momsize); checkCuda(cudaGetLastError()); for (int idx=0; idx<grids_->NxNycNz; idx++) { init_h[idx].x = 0.; init_h[idx].y = 0.; } if (pars_->ks) { init_h[1].x = 0.5; init_h[2].y = -0.25; } else { if(pars_->init_single) { //initialize single mode int iky = pars_->iky_single; int ikx = pars_->ikx_single; int NKX = 1; if (iky == 0 && ikx<1+(grids_->Nx-1)/3) NKX = 2; // reality condition for tertiary tests for (int j = 0; j<NKX; j++) { if (j==1) ikx = grids_->Nx-ikx; DEBUG_PRINT("ikx, iky: %d \t %d \n",ikx, iky); // float fac; // if(pars_->nlpm_test && iky==0) fac = .5; // else fac = 1.; // DEBUG_PRINT("fac = %f \n",fac); for(int iz=0; iz<grids_->Nz; iz++) { int index = iky + grids_->Nyc*ikx + grids_->NxNyc*iz; init_h[index].x = pars_->init_amp; //*fac; init_h[index].y = 0.; //init_amp; } } } else { srand(22); float samp; int idx; // printf("Hacking the initial condition! \n"); for(int i=0; i < 1 + (grids_->Nx - 1)/3; i++) { for(int j=1; j < 1 + (grids_->Ny - 1)/3; j++) { samp = pars_->init_amp; float ra = (float) (samp * (rand()-RAND_MAX/2) / RAND_MAX); float rb = (float) (samp * (rand()-RAND_MAX/2) / RAND_MAX); for (int js=0; js < 2; js++) { if (i==0) { idx = i; } else { idx = (js==0) ? i : grids_->Nx-i; } for(int k=0; k<grids_->Nz; k++) { int index = j + grids_->Nyc*(idx + grids_->Nx*k); if (js == 0) { init_h[index].x = ra; init_h[index].y = rb; } else { init_h[index].x = rb; init_h[index].y = ra; } if (pars_->kpar_init < 0.) { init_h[index].x *= (cos( -pars_->kpar_init *z_h[k]/pars_->Zp) + cos((-pars_->kpar_init+1.)*z_h[k]/pars_->Zp)); init_h[index].y *= (cos( -pars_->kpar_init *z_h[k]/pars_->Zp) + cos((-pars_->kpar_init+1.)*z_h[k]/pars_->Zp)); } else { init_h[index].x *= cos(pars_->kpar_init*z_h[k]/pars_->Zp); init_h[index].y *= cos(pars_->kpar_init*z_h[k]/pars_->Zp); } // printf("init_h[%d] = (%e, %e) \n",index,init_h[index].x,init_h[index].y); } } if (pars_->random_init) { for (int k=0; k<grids_->Nz; k++) { int index = j + grids_->Nyc*(idx + grids_->Nx*k); init_h[index].x = 0.; init_h[index].y = 0.; } for (int jj=1; jj<1+(grids_->Nz-1)/3; jj++) { float ka = (float) (samp * rand() / RAND_MAX); float pa = (float) (M_PI * (rand()-RAND_MAX/2) / RAND_MAX); float kb = (float) (samp * rand() / RAND_MAX); float pb = (float) (M_PI * (rand()-RAND_MAX/2) / RAND_MAX); for (int k=0; k<grids_->Nz; k++) { int index = j + grids_->Nyc*(idx + grids_->Nx*k); init_h[index].x += ka*sin((float) jj*z_h[k] + pa); init_h[index].y += kb*sin((float) jj*z_h[k] + pb); } } } } } } } // copy initial condition into device memory for (int is=0; is<grids_->Nspecies; is++) { switch (pars_->initf) { case inits::density : CP_TO_GPU(dens_ptr[is], init_h, momsize); break; case inits::upar : CP_TO_GPU(upar_ptr[is], init_h, momsize); break; case inits::tpar : CP_TO_GPU(tpar_ptr[is], init_h, momsize); break; case inits::tperp : CP_TO_GPU(tprp_ptr[is], init_h, momsize); break; case inits::qpar : CP_TO_GPU(qpar_ptr[is], init_h, momsize); break; case inits::qperp : CP_TO_GPU(qprp_ptr[is], init_h, momsize); break; } checkCuda(cudaGetLastError()); } cudaFreeHost(init_h); // restart_read goes here, if restart == T // as in gs2, if restart_read is true, we want to *add* the restart values to anything // that has happened above and also move the value of time up to the end of the previous run if(pars_->restart) { DEBUG_PRINT("reading restart file \n"); this->restart_read(time); } cudaDeviceSynchronize(); checkCuda(cudaGetLastError()); DEBUG_PRINT("initial conditions set \n"); } void MomentsG::scale(double scalar) {scale_kernel GALL (G_lm, scalar);} void MomentsG::scale(cuComplex scalar) {scale_kernel GALL (G_lm, scalar);} void MomentsG::mask(void) {maskG GALL (this->G_lm);} void MomentsG::getH(cuComplex* J0phi) {Hkernel GALL (G_lm, J0phi);} void MomentsG::getG(cuComplex* J0phi) {Gkernel GALL (G_lm, J0phi);} void MomentsG::rescale(float * phi_max) { rescale_kernel GALL (G_lm, phi_max, grids_->Nspecies*grids_->Nm*grids_->Nl); } void MomentsG::add_scaled(double c1, MomentsG* G1, double c2, MomentsG* G2) { bool neqfix = !pars_->eqfix; add_scaled_kernel GALL (G_lm, c1, G1->G_lm, c2, G2->G_lm, neqfix); } void MomentsG::add_scaled(double c1, MomentsG* G1, double c2, MomentsG* G2, double c3, MomentsG* G3) { bool neqfix = !pars_->eqfix; add_scaled_kernel GALL (G_lm, c1, G1->G_lm, c2, G2->G_lm, c3, G3->G_lm, neqfix); } void MomentsG::add_scaled(double c1, MomentsG* G1, double c2, MomentsG* G2, double c3, MomentsG* G3, double c4, MomentsG* G4) { bool neqfix = !pars_->eqfix; add_scaled_kernel GALL (G_lm, c1, G1->G_lm, c2, G2->G_lm, c3, G3->G_lm, c4, G4->G_lm, neqfix); } void MomentsG::add_scaled(double c1, MomentsG* G1, double c2, MomentsG* G2, double c3, MomentsG* G3, double c4, MomentsG* G4, double c5, MomentsG* G5) { bool neqfix = !pars_->eqfix; add_scaled_kernel GALL (G_lm, c1, G1->G_lm, c2, G2->G_lm, c3, G3->G_lm, c4, G4->G_lm, c5, G5->G_lm, neqfix); } void MomentsG::reality(int ngz) { dim3 dB; dim3 dG; int ngx = (grids_->Nx-1)/3 + 1; dB.x = 32; dG.x = (ngx-1)/dB.x + 1; int ngy = grids_->Nz; dB.y = 8; dG.y = (ngy-1)/dB.y + 1; dB.z = 4; dG.z = (ngz-1)/dB.z + 1; reality_kernel <<< dG, dB >>> (G_lm, ngz); } void MomentsG::restart_write(double* time) { float* G_out; cuComplex* G_h; int retval; int ncres; int moments_out[7]; size_t start[7]; size_t count[7]; int Nx = grids_->Nx; int Nakx = grids_->Nakx; int Naky = grids_->Naky; int Nyc = grids_->Nyc; int Nz = grids_->Nz; int nspec = pars_->nspec; int Nm = grids_->Nm; int Nl = grids_->Nl; // handles int id_ri, id_nz, id_Nkx, id_Nky; int id_nh, id_nl, id_ns; int id_G, id_time; char strb[512]; strcpy(strb, pars_->restart_to_file.c_str()); // if(pars_->restart) { // ultimately, appending to an existing file // if appending, are the time values consistent? // inquire/define the variable names // } else { int ri=2; if (retval = nc_create(strb, NC_CLOBBER, &ncres)) ERR(retval); if (retval = nc_def_dim(ncres, "ri", ri, &id_ri)) ERR(retval); if (retval = nc_def_dim(ncres, "Nz", Nz, &id_nz)) ERR(retval); if (retval = nc_def_dim(ncres, "Nkx", Nakx, &id_Nkx)) ERR(retval); if (retval = nc_def_dim(ncres, "Nky", Naky, &id_Nky)) ERR(retval); if (retval = nc_def_dim(ncres, "Nl", Nl, &id_nl)) ERR(retval); if (retval = nc_def_dim(ncres, "Nm", Nm, &id_nh)) ERR(retval); if (retval = nc_def_dim(ncres, "Ns", nspec, &id_ns)) ERR(retval); moments_out[0] = id_ns; count[0] = nspec; moments_out[1] = id_nh; count[1] = Nm; moments_out[2] = id_nl; count[2] = Nl; moments_out[3] = id_nz; count[3] = Nz; moments_out[4] = id_Nkx; count[4] = Nakx; moments_out[5] = id_Nky; count[5] = Naky; moments_out[6] = id_ri; count[6] = ri; start[0] = 0; start[1] = 0; start[2] = 0; start[3] = 0; start[4] = 0; start[5] = 0; start[6] = 0; if (retval = nc_def_var(ncres, "G", NC_FLOAT, 7, moments_out, &id_G)) ERR(retval); if (retval = nc_def_var(ncres, "time", NC_DOUBLE, 0, 0, &id_time)) ERR(retval); if (retval = nc_enddef(ncres)) ERR(retval); if (retval = nc_put_var(ncres, id_time, time)) ERR(retval); unsigned int itot, jtot; jtot = Nx * Nyc * Nz * Nm * Nl * nspec; itot = Nakx * Naky * Nz * Nm * Nl * nspec; cudaMallocHost((void**) &G_h, sizeof(cuComplex) * jtot); cudaMallocHost((void**) &G_out, sizeof(float) * itot * 2); for (unsigned int index=0; index < jtot; index++) {G_h[index].x = 0.; G_h[index].y = 0.;} for (unsigned int index=0; index < 2*itot; index++) G_out[index] = 0.; CP_TO_CPU(G_h, G_lm, sizeof(cuComplex)*jtot); for (int is=0; is < nspec; is++) { for (int m=0; m < Nm; m++) { for (int l=0; l < Nl; l++) { for (int k=0; k < Nz; k++) { for (int i=0; i < 1 + (Nx-1)/3; i++) { for (int j=0; j < Naky; j++) { unsigned int index = j + Nyc *(i + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_out = j + Naky*(i + Nakx*(k + Nz*(l + Nl*(m + Nm*is)))); G_out[2*index_out] = G_h[index].x; G_out[2*index_out+1] = G_h[index].y; } } for (int i=2*Nx/3+1; i < Nx; i++) { for (int j=0; j < Naky; j++) { int it = i-2*Nx/3+(Nx-1)/3; // not very clear, depends on arcane integer math rules unsigned int index = j + Nyc *(i + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_out = j + Naky*(it + Nakx*(k + Nz*(l + Nl*(m + Nm*is)))); G_out[2*index_out] = G_h[index].x; G_out[2*index_out+1] = G_h[index].y; } } } } } } if (retval = nc_put_vara(ncres, id_G, start, count, G_out)) ERR(retval); cudaFreeHost(G_out); cudaFreeHost(G_h); if (retval = nc_close(ncres)) ERR(retval); } void MomentsG::restart_read(double* time) { float scale; float* G_in; cuComplex* G_h; cuComplex* G_hold; int retval; int ncres; size_t lhsize = grids_->size_G; size_t ldum; int Nx = grids_->Nx; int Nakx = grids_->Nakx; int Naky = grids_->Naky; int Ny = grids_->Ny; int Nyc = grids_->Nyc; int Nz = grids_->Nz; int nspec = pars_->nspec; int Nm = grids_->Nm; int Nl = grids_->Nl; // handles int id_nz, id_Nkx, id_Nky; int id_nh, id_nl, id_ns; int id_G, id_time; char stra[NC_MAX_NAME+1]; char strb[512]; strcpy(strb, pars_->restart_from_file.c_str()); if (retval = nc_open(strb, NC_NOWRITE, &ncres)) { printf("file: %s \n",strb); ERR(retval);} if (retval = nc_inq_dimid(ncres, "Nkx", &id_Nkx)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Nky", &id_Nky)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Nz", &id_nz)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Nl", &id_nl)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Nm", &id_nh)) ERR(retval); if (retval = nc_inq_dimid(ncres, "Ns", &id_ns)) ERR(retval); if (retval = nc_inq_varid(ncres, "G", &id_G)) ERR(retval); if (retval = nc_inq_varid(ncres, "time", &id_time)) ERR(retval); if (retval = nc_inq_dim(ncres, id_ns, stra, &ldum)) ERR(retval); if (nspec-pars_->ns_add != (int) ldum) { printf("Cannot restart because of nspec mismatch: %d \t %zu \n", nspec, ldum); exit (1); } if (retval = nc_inq_dim(ncres, id_nh, stra, &ldum)) ERR(retval); if (Nm-pars_->nm_add != (int) ldum) { printf("Cannot restart because of Nm mismatch: %d \t %zu \n", Nm, ldum); exit (1); } if (retval = nc_inq_dim(ncres, id_nl, stra, &ldum)) ERR(retval); if (Nl-pars_->nl_add != (int) ldum) { printf("Cannot restart because of Nl mismatch: %d \t %zu \n", Nl, ldum); exit (1); } if (retval = nc_inq_dim(ncres, id_nz, stra, &ldum)) ERR(retval); if (Nz != (int) ldum*pars_->ntheta_mult) { printf("Cannot restart because of nz mismatch: %d \t %zu \n", Nz, ldum*pars_->ntheta_mult); exit (1); } if (retval = nc_inq_dim(ncres, id_Nkx, stra, &ldum)) ERR(retval); if (1 + 2*((Nx/pars_->nx_mult-1)/3) != (int) ldum) { printf("Cannot restart because of Nkx mismatch: %d \t %zu \n", Nakx, ldum); exit (1); } if (retval = nc_inq_dim(ncres, id_Nky, stra, &ldum)) ERR(retval); if (1 + (Ny/pars_->ny_mult-1)/3 != (int) ldum) { printf("Cannot restart because of Nky mismatch: %d \t %zu \n", Naky, ldum); exit (1); } unsigned int itot; // itot = Nakx * Naky * Nz * Nm * Nl * nspec; itot = Nx * Nyc * Nz * Nm * Nl * nspec; unsigned int iitot = Nakx * Naky * Nz * Nm * Nl * nspec; if (pars_->domain_change) { int old_Nakx = 1 + 2 * ((Nx/pars_->nx_mult - 1)/3); int old_Naky = 1 + ((Ny/pars_->ny_mult - 1)/3); int old_Nz = Nz/pars_->ntheta_mult; int old_Nl = Nl - pars_->nl_add; int old_Nm = Nm - pars_->nm_add; int old_ns = nspec - pars_->ns_add; iitot = old_Nakx * old_Naky * old_Nz * old_Nm * old_Nl * old_ns; } cudaMallocHost((void**) &G_hold, lhsize); cudaMallocHost((void**) &G_h, lhsize); cudaMallocHost((void**) &G_in, sizeof(float) * iitot * 2); for (unsigned int index=0; index < itot; index++) {G_hold[index].x = 0.; G_hold[index].y = 0.;} for (unsigned int index=0; index < itot; index++) {G_h[index].x = 0.; G_h[index].y = 0.;} for (unsigned int index=0; index<2*iitot; index++) {G_in[index] = 0.;} CP_TO_CPU(G_hold, G_lm, sizeof(cuComplex)*itot); if (retval = nc_get_var(ncres, id_G, G_in)) ERR(retval); if (retval = nc_get_var(ncres, id_time, time)) ERR(retval); if (retval = nc_close(ncres)) ERR(retval); scale = pars_->scale; if (!pars_->domain_change) { for (int is=0; is < nspec; is++) { for (int m=0; m < Nm; m++) { for (int l=0; l < Nl; l++) { for (int k=0; k < Nz; k++) { for (int i=0; i < 1 + (Nx-1)/3; i++) { for (int j=0; j < Naky; j++) { unsigned int index = j + Nyc *(i + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_in = j + Naky*(i + Nakx*(k + Nz*(l + Nl*(m + Nm*is)))); G_h[index].x = scale * G_in[2*index_in] + G_hold[index].x; G_h[index].y = scale * G_in[2*index_in+1] + G_hold[index].y; } } for (int i=2*Nx/3+1; i < Nx; i++) { for (int j=0; j < Naky; j++) { int it = i-2*Nx/3+(Nx-1)/3; // not very clear, depends on arcane integer math rules unsigned int index = j + Nyc *(i + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_in = j + Naky*(it + Nakx*(k + Nz*(l + Nl*(m + Nm*is)))); G_h[index].x = scale * G_in[2*index_in] + G_hold[index].x; G_h[index].y = scale * G_in[2*index_in+1] + G_hold[index].y; } } } } } } } else { int old_Naky = 1 + (Ny/pars_->ny_mult - 1)/3; int jj; int old_Nakx = 1 + 2*((Nx/pars_->nx_mult - 1)/3); int ii; int old_Nx = Nx/pars_->nx_mult; int old_Nz = Nz/pars_->ntheta_mult; // not yet implemented int old_Nm = Nm - pars_->nm_add; int old_Nl = Nl - pars_->nl_add; int old_ns = nspec - pars_->ns_add; for (int is=0; is < min(old_ns, nspec); is++) { for (int m=0; m < min(old_Nm, Nm); m++) { for (int l=0; l < min(old_Nl, Nl); l++) { for (int k=0; k < Nz; k++) { for (int i=0; i < 1 + old_Nakx/2; i++) { ii = i * pars_->x0_mult; if (ii < 1 + Nakx/2) { for (int j=0; j < old_Naky; j++) { jj = j * pars_->y0_mult; if (jj < Naky) { unsigned int index = jj + Nyc *(ii + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_in = j + old_Naky*(i + old_Nakx*(k + Nz*(l + old_Nl*(m + old_Nm*is)))); G_h[index].x = scale * G_in[2*index_in] + G_hold[index].x; G_h[index].y = scale * G_in[2*index_in+1] + G_hold[index].y; } } } } for (int i=2*old_Nx/3+1; i < old_Nx; i++) { ii =(i-old_Nx) * pars_->x0_mult + Nx; if ((i-old_Nx) * pars_->x0_mult + 1 + Nakx/2 > 0) { for (int j=0; j < old_Naky; j++) { jj = j * pars_->y0_mult; if (jj < Naky) { int it = i-2*old_Nx/3+(old_Nx-1)/3; // not very clear, depends on arcane integer math rules unsigned int index = jj + Nyc *(ii + Nx *(k + Nz*(l + Nl*(m + Nm*is)))); unsigned int index_in = j + old_Naky*(it + old_Nakx*(k + Nz*(l + old_Nl*(m + old_Nm*is)))); G_h[index].x = scale * G_in[2*index_in] + G_hold[index].x; G_h[index].y = scale * G_in[2*index_in+1] + G_hold[index].y; } } } } } } } } } cudaFreeHost(G_in); cudaFreeHost(G_hold); unsigned int jtot = Nx * Nyc * Nz * Nm * Nl * nspec; CP_TO_GPU(G_lm, G_h, sizeof(cuComplex)*jtot); cudaFreeHost(G_h); } void MomentsG::qvar(int N) { cuComplex* G_h; // int Nk = grids_->Nyc; // Nk = 1; int Nk = grids_->NxNycNz; G_h = (cuComplex*) malloc (sizeof(cuComplex)*N); for (int i=0; i<N; i++) {G_h[i].x = 0.; G_h[i].y = 0.;} CP_TO_CPU (G_h, G_lm, N*sizeof(cuComplex)); printf("\n"); // for (int i=0; i<N; i++) printf("var(%d,%d) = (%e, %e) \n", i%Nk, i/Nk, G_h[i].x, G_h[i].y); // for (int i=N-20; i<N; i++) printf("var(%d) = (%e, %e) \n", i, G_h[i].x, G_h[i].y); for (int i=0; i<N; i++) printf("m var(%d,%d) = (%e, %e) \n", i%Nk, i/Nk, G_h[i].x, G_h[i].y); printf("\n"); free (G_h); } void MomentsG::update_tprim(double time) { // this is a proof-of-principle hack. typically nothing will happen here // for one species (or the first species in the species list): // adjust tprim according to the function // if t < t0: // tprim = tprim_0 // if t > t0: // if (t < tf) tprim = tprim_0 + (tprim_0 - tprim_f)/(t0-tf)*(t-t0) // else tprim = tprim_f if (pars_->tp_t0 > -0.5) { if (time < (double) pars_->tp_t0) { float tp = pars_->tprim0; CP_TO_GPU (tps, &tp, sizeof(float)); } else { if (time < (double) pars_->tp_tf) { float tfac = (float) time; float tprim0 = pars_->tprim0; float tprimf = pars_->tprimf; float t0 = pars_->tp_t0; float tf = pars_->tp_tf; float tp = tprim0 + (tprim0-tprimf)/(t0-tf)*(tfac-t0); CP_TO_GPU (tps, &tp, sizeof(float)); } else { float tp = pars_->tprimf; CP_TO_GPU (tps, &tp, sizeof(float)); } } } }
8390c08f54a9f43c1ea551d15e280a7d70d36dba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel4_plus_2_back [3][2]; static int dims_update_halo_kernel4_plus_2_back_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel4_plus_2_back_gpu(ACC<double> &vol_flux_y, ACC<double> &mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y(0,0,0) = vol_flux_y(0,0,2); if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y(0,0,0) = mass_flux_y(0,0,2); } __global__ void ops_update_halo_kernel4_plus_2_back( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_back[0][0] * dims_update_halo_kernel4_plus_2_back[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_back[1][0] * dims_update_halo_kernel4_plus_2_back[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel4_plus_2_back[0][0], dims_update_halo_kernel4_plus_2_back[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel4_plus_2_back[1][0], dims_update_halo_kernel4_plus_2_back[1][1], arg1); update_halo_kernel4_plus_2_back_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_back_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,80)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(80,"update_halo_kernel4_plus_2_back"); OPS_kernels[80].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel4_plus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel4_plus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel4_plus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel4_plus_2_back_h[1][1]) { dims_update_halo_kernel4_plus_2_back_h[0][0] = xdim0; dims_update_halo_kernel4_plus_2_back_h[0][1] = ydim0; dims_update_halo_kernel4_plus_2_back_h[1][0] = xdim1; dims_update_halo_kernel4_plus_2_back_h[1][1] = ydim1; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel4_plus_2_back, dims_update_halo_kernel4_plus_2_back_h, sizeof(dims_update_halo_kernel4_plus_2_back))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[80].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[80].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[80].mpi_time += t2-t1; OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 80; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 80; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_back_execute; if (OPS_diags > 1) { ops_timing_realloc(80,"update_halo_kernel4_plus_2_back"); } ops_enqueue_kernel(desc); } #endif
8390c08f54a9f43c1ea551d15e280a7d70d36dba.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel4_plus_2_back [3][2]; static int dims_update_halo_kernel4_plus_2_back_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel4_plus_2_back_gpu(ACC<double> &vol_flux_y, ACC<double> &mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y(0,0,0) = vol_flux_y(0,0,2); if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y(0,0,0) = mass_flux_y(0,0,2); } __global__ void ops_update_halo_kernel4_plus_2_back( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_back[0][0] * dims_update_halo_kernel4_plus_2_back[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_back[1][0] * dims_update_halo_kernel4_plus_2_back[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel4_plus_2_back[0][0], dims_update_halo_kernel4_plus_2_back[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel4_plus_2_back[1][0], dims_update_halo_kernel4_plus_2_back[1][1], arg1); update_halo_kernel4_plus_2_back_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_back_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,80)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(80,"update_halo_kernel4_plus_2_back"); OPS_kernels[80].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel4_plus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel4_plus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel4_plus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel4_plus_2_back_h[1][1]) { dims_update_halo_kernel4_plus_2_back_h[0][0] = xdim0; dims_update_halo_kernel4_plus_2_back_h[0][1] = ydim0; dims_update_halo_kernel4_plus_2_back_h[1][0] = xdim1; dims_update_halo_kernel4_plus_2_back_h[1][1] = ydim1; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel4_plus_2_back, dims_update_halo_kernel4_plus_2_back_h, sizeof(dims_update_halo_kernel4_plus_2_back))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[80].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel4_plus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[80].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[80].mpi_time += t2-t1; OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 80; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 80; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_back_execute; if (OPS_diags > 1) { ops_timing_realloc(80,"update_halo_kernel4_plus_2_back"); } ops_enqueue_kernel(desc); } #endif
b8b088297b3c1fe8b155872152edbb9cdb8a53d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <iostream> #include <random> #include <sstream> #include <stdexcept> #include <vector> struct Data { Data(int size) : size(size), bytes(size * sizeof(float)) { hipMalloc(&x, bytes); hipMalloc(&y, bytes); hipMemset(x, 0, bytes); hipMemset(y, 0, bytes); } Data(int size, std::vector<float>& h_x, std::vector<float>& h_y) : size(size), bytes(size * sizeof(float)) { hipMalloc(&x, bytes); hipMalloc(&y, bytes); hipMemcpy(x, h_x.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(y, h_y.data(), bytes, hipMemcpyHostToDevice); } ~Data() { hipFree(x); hipFree(y); } float* x{nullptr}; float* y{nullptr}; int size{0}; int bytes{0}; }; __device__ float squared_l2_distance(float x_1, float y_1, float x_2, float y_2) { return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2); } __global__ void assign_clusters(const float* __restrict__ data_x, const float* __restrict__ data_y, int data_size, const float* __restrict__ means_x, const float* __restrict__ means_y, float* __restrict__ new_sums_x, float* __restrict__ new_sums_y, int k, int* __restrict__ counts) { extern __shared__ float shared_means[]; const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; if (threadIdx.x < k) { shared_means[threadIdx.x] = means_x[threadIdx.x]; shared_means[k + threadIdx.x] = means_y[threadIdx.x]; } __syncthreads(); // Make global loads once. const float x = data_x[index]; const float y = data_y[index]; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = squared_l2_distance(x, y, shared_means[cluster], shared_means[k + cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } atomicAdd(&new_sums_x[best_cluster], x); atomicAdd(&new_sums_y[best_cluster], y); atomicAdd(&counts[best_cluster], 1); } __global__ void compute_new_means_and_reset(float* __restrict__ means_x, float* __restrict__ means_y, float* __restrict__ new_sum_x, float* __restrict__ new_sum_y, int* __restrict__ counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); means_x[cluster] = new_sum_x[cluster] / count; means_y[cluster] = new_sum_y[cluster] / count; new_sum_y[cluster] = 0; new_sum_x[cluster] = 0; counts[cluster] = 0; } int main(int argc, const char* argv[]) { if (argc < 3) { std::cerr << "usage: assign_clusters <data-file> <k> [iterations]" << std::endl; std::exit(EXIT_FAILURE); } const auto k = std::atoi(argv[2]); const auto number_of_iterations = (argc == 4) ? std::atoi(argv[3]) : 300; std::vector<float> h_x; std::vector<float> h_y; std::ifstream stream(argv[1]); std::string line; while (std::getline(stream, line)) { std::istringstream line_stream(line); float x, y; uint16_t label; line_stream >> x >> y >> label; h_x.push_back(x); h_y.push_back(y); } const size_t number_of_elements = h_x.size(); Data d_data(number_of_elements, h_x, h_y); std::mt19937 rng(std::random_device{}()); std::shuffle(h_x.begin(), h_x.end(), rng); std::shuffle(h_y.begin(), h_y.end(), rng); Data d_means(k, h_x, h_y); Data d_sums(k); int* d_counts; hipMalloc(&d_counts, k * sizeof(int)); hipMemset(d_counts, 0, k * sizeof(int)); const int threads = 1024; const int blocks = (number_of_elements + threads - 1) / threads; const int shared_memory = d_means.bytes * 2; std::cerr << "Processing " << number_of_elements << " points on " << blocks << " blocks x " << threads << " threads" << std::endl; const auto start = std::chrono::high_resolution_clock::now(); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { hipLaunchKernelGGL(( assign_clusters), dim3(blocks), dim3(threads), shared_memory, 0, d_data.x, d_data.y, d_data.size, d_means.x, d_means.y, d_sums.x, d_sums.y, k, d_counts); hipDeviceSynchronize(); hipLaunchKernelGGL(( compute_new_means_and_reset), dim3(1), dim3(k), 0, 0, d_means.x, d_means.y, d_sums.x, d_sums.y, d_counts); hipDeviceSynchronize(); } const auto end = std::chrono::high_resolution_clock::now(); const auto duration = std::chrono::duration_cast<std::chrono::duration<float>>(end - start); std::cerr << "Took: " << duration.count() << "s" << std::endl; hipFree(d_counts); std::vector<float> mean_x(k, 0); std::vector<float> mean_y(k, 0); hipMemcpy(mean_x.data(), d_means.x, d_means.bytes, hipMemcpyDeviceToHost); hipMemcpy(mean_y.data(), d_means.y, d_means.bytes, hipMemcpyDeviceToHost); for (size_t cluster = 0; cluster < k; ++cluster) { std::cout << mean_x[cluster] << " " << mean_y[cluster] << std::endl; } }
b8b088297b3c1fe8b155872152edbb9cdb8a53d7.cu
#include <algorithm> #include <cfloat> #include <chrono> #include <fstream> #include <iostream> #include <random> #include <sstream> #include <stdexcept> #include <vector> struct Data { Data(int size) : size(size), bytes(size * sizeof(float)) { cudaMalloc(&x, bytes); cudaMalloc(&y, bytes); cudaMemset(x, 0, bytes); cudaMemset(y, 0, bytes); } Data(int size, std::vector<float>& h_x, std::vector<float>& h_y) : size(size), bytes(size * sizeof(float)) { cudaMalloc(&x, bytes); cudaMalloc(&y, bytes); cudaMemcpy(x, h_x.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(y, h_y.data(), bytes, cudaMemcpyHostToDevice); } ~Data() { cudaFree(x); cudaFree(y); } float* x{nullptr}; float* y{nullptr}; int size{0}; int bytes{0}; }; __device__ float squared_l2_distance(float x_1, float y_1, float x_2, float y_2) { return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2); } __global__ void assign_clusters(const float* __restrict__ data_x, const float* __restrict__ data_y, int data_size, const float* __restrict__ means_x, const float* __restrict__ means_y, float* __restrict__ new_sums_x, float* __restrict__ new_sums_y, int k, int* __restrict__ counts) { extern __shared__ float shared_means[]; const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; if (threadIdx.x < k) { shared_means[threadIdx.x] = means_x[threadIdx.x]; shared_means[k + threadIdx.x] = means_y[threadIdx.x]; } __syncthreads(); // Make global loads once. const float x = data_x[index]; const float y = data_y[index]; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = squared_l2_distance(x, y, shared_means[cluster], shared_means[k + cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } atomicAdd(&new_sums_x[best_cluster], x); atomicAdd(&new_sums_y[best_cluster], y); atomicAdd(&counts[best_cluster], 1); } __global__ void compute_new_means_and_reset(float* __restrict__ means_x, float* __restrict__ means_y, float* __restrict__ new_sum_x, float* __restrict__ new_sum_y, int* __restrict__ counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); means_x[cluster] = new_sum_x[cluster] / count; means_y[cluster] = new_sum_y[cluster] / count; new_sum_y[cluster] = 0; new_sum_x[cluster] = 0; counts[cluster] = 0; } int main(int argc, const char* argv[]) { if (argc < 3) { std::cerr << "usage: assign_clusters <data-file> <k> [iterations]" << std::endl; std::exit(EXIT_FAILURE); } const auto k = std::atoi(argv[2]); const auto number_of_iterations = (argc == 4) ? std::atoi(argv[3]) : 300; std::vector<float> h_x; std::vector<float> h_y; std::ifstream stream(argv[1]); std::string line; while (std::getline(stream, line)) { std::istringstream line_stream(line); float x, y; uint16_t label; line_stream >> x >> y >> label; h_x.push_back(x); h_y.push_back(y); } const size_t number_of_elements = h_x.size(); Data d_data(number_of_elements, h_x, h_y); std::mt19937 rng(std::random_device{}()); std::shuffle(h_x.begin(), h_x.end(), rng); std::shuffle(h_y.begin(), h_y.end(), rng); Data d_means(k, h_x, h_y); Data d_sums(k); int* d_counts; cudaMalloc(&d_counts, k * sizeof(int)); cudaMemset(d_counts, 0, k * sizeof(int)); const int threads = 1024; const int blocks = (number_of_elements + threads - 1) / threads; const int shared_memory = d_means.bytes * 2; std::cerr << "Processing " << number_of_elements << " points on " << blocks << " blocks x " << threads << " threads" << std::endl; const auto start = std::chrono::high_resolution_clock::now(); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { assign_clusters<<<blocks, threads, shared_memory>>>(d_data.x, d_data.y, d_data.size, d_means.x, d_means.y, d_sums.x, d_sums.y, k, d_counts); cudaDeviceSynchronize(); compute_new_means_and_reset<<<1, k>>>(d_means.x, d_means.y, d_sums.x, d_sums.y, d_counts); cudaDeviceSynchronize(); } const auto end = std::chrono::high_resolution_clock::now(); const auto duration = std::chrono::duration_cast<std::chrono::duration<float>>(end - start); std::cerr << "Took: " << duration.count() << "s" << std::endl; cudaFree(d_counts); std::vector<float> mean_x(k, 0); std::vector<float> mean_y(k, 0); cudaMemcpy(mean_x.data(), d_means.x, d_means.bytes, cudaMemcpyDeviceToHost); cudaMemcpy(mean_y.data(), d_means.y, d_means.bytes, cudaMemcpyDeviceToHost); for (size_t cluster = 0; cluster < k; ++cluster) { std::cout << mean_x[cluster] << " " << mean_y[cluster] << std::endl; } }
c59c04c3005df1c65a92e1fa8876aec07159c6c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* * Host function to initialize vector elements. This function * simply initializes each element to equal its index in the * vector. */ void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } /* * Device kernel stores into `result` the sum of each * same-indexed value of `a` and `b`. */ __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } /* * Host function to confirm values in `vector`. This function * assumes all values are the same `target` value. */ void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { const int N = 2<<24; size_t size = N * sizeof(float); float *a; float *b; float *c; hipMallocManaged(&a, size); hipMallocManaged(&b, size); hipMallocManaged(&c, size); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); size_t threadsPerBlock; size_t numberOfBlocks; /* * nsys should register performance changes when execution configuration * is updated. */ threadsPerBlock = 128; numberOfBlocks = 24; hipError_t addVectorsErr; hipError_t asyncErr; hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N); addVectorsErr = hipGetLastError(); if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr)); asyncErr = hipDeviceSynchronize(); if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr)); checkElementsAre(7, c, N); hipFree(a); hipFree(b); hipFree(c); }
c59c04c3005df1c65a92e1fa8876aec07159c6c0.cu
#include <stdio.h> /* * Host function to initialize vector elements. This function * simply initializes each element to equal its index in the * vector. */ void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } /* * Device kernel stores into `result` the sum of each * same-indexed value of `a` and `b`. */ __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } /* * Host function to confirm values in `vector`. This function * assumes all values are the same `target` value. */ void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { const int N = 2<<24; size_t size = N * sizeof(float); float *a; float *b; float *c; cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); size_t threadsPerBlock; size_t numberOfBlocks; /* * nsys should register performance changes when execution configuration * is updated. */ threadsPerBlock = 128; numberOfBlocks = 24; cudaError_t addVectorsErr; cudaError_t asyncErr; addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N); addVectorsErr = cudaGetLastError(); if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr)); asyncErr = cudaDeviceSynchronize(); if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
39c025adfe32d39e41c4ddaa697c23549337c527.hip
// !!! This is a file automatically generated by hipify!!! #include <rocblas.h> #include <algorithm> #include <limits> #include "cml/cml_blas.cuh" #include "cml/cml_linalg.cuh" #include "cml/cml_matrix.cuh" #include "matrix/matrix_dense.h" #include "projector/projector_direct.h" #include "projector_helper.cuh" #include "util.h" namespace pogs { namespace { template<typename T> struct GpuData { T *AA, *L, s; hipblasHandle_t handle; GpuData() : AA(0), L(0), s(static_cast<T>(-1.)) { hipblasCreate(&handle); CUDA_CHECK_ERR(); } ~GpuData() { hipblasDestroy(handle); CUDA_CHECK_ERR(); } }; } // namespace template <typename T, typename M> ProjectorDirect<T, M>::ProjectorDirect(const M& A) : _A(A) { // Set GPU specific this->_info. GpuData<T> *info = new GpuData<T>(); this->_info = reinterpret_cast<void*>(info); } template <typename T, typename M> ProjectorDirect<T, M>::~ProjectorDirect() { GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); if (info->AA) { hipFree(info->AA); info->AA = 0; CUDA_CHECK_ERR(); } if (info->L) { hipFree(info->L); info->L = 0; CUDA_CHECK_ERR(); } delete info; this->_info = 0; } template <typename T, typename M> int ProjectorDirect<T, M>::Init() { if (this->_done_init) return 1; this->_done_init = true; ASSERT(_A.IsInit()); GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); size_t min_dim = ::min(_A.Rows(), _A.Cols()); hipMalloc(&(info->AA), min_dim * min_dim * sizeof(T)); hipMalloc(&(info->L), min_dim * min_dim * sizeof(T)); hipMemset(info->AA, 0, min_dim * min_dim * sizeof(T)); hipMemset(info->L, 0, min_dim * min_dim * sizeof(T)); CUDA_CHECK_ERR(); hipblasOperation_t op_type = _A.Rows() > _A.Cols() ? HIPBLAS_OP_T : HIPBLAS_OP_N; // Compute AA if (_A.Order() == MatrixDense<T>::ROW) { const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor> (info->AA, min_dim, min_dim); cml::blas_syrk(info->handle, HIPBLAS_FILL_MODE_LOWER, op_type, static_cast<T>(1.), &A, static_cast<T>(0.), &AA); } else { const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor> (info->AA, min_dim, min_dim); cml::blas_syrk(info->handle, HIPBLAS_FILL_MODE_LOWER, op_type, static_cast<T>(1.), &A, static_cast<T>(0.), &AA); } CUDA_CHECK_ERR(); return 0; } template <typename T, typename M> int ProjectorDirect<T, M>::Project(const T *x0, const T *y0, T s, T *x, T *y, T tol) { DEBUG_EXPECT(this->_done_init); if (!this->_done_init || s < static_cast<T>(0.)) return 1; // Get Cublas handle GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); hipblasHandle_t hdl = info->handle; size_t min_dim = ::min(_A.Rows(), _A.Cols()); // Set up views for raw vectors. cml::vector<T> y_vec = cml::vector_view_array(y, _A.Rows()); const cml::vector<T> y0_vec = cml::vector_view_array(y0, _A.Rows()); cml::vector<T> x_vec = cml::vector_view_array(x, _A.Cols()); const cml::vector<T> x0_vec = cml::vector_view_array(x0, _A.Cols()); // Set (x, y) = (x0, y0). cml::vector_memcpy(&x_vec, &x0_vec); cml::vector_memcpy(&y_vec, &y0_vec); CUDA_CHECK_ERR(); if (_A.Order() == MatrixDense<T>::ROW) { const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor> (info->AA, min_dim, min_dim); cml::matrix<T, CblasRowMajor> L = cml::matrix_view_array<T, CblasRowMajor> (info->L, min_dim, min_dim); CUDA_CHECK_ERR(); if (s != info->s) { cml::matrix_memcpy(&L, &AA); cml::vector<T> diagL = cml::matrix_diagonal(&L); cml::vector_add_constant(&diagL, s); hipDeviceSynchronize(); CUDA_CHECK_ERR(); cml::linalg_cholesky_decomp(hdl, &L); hipDeviceSynchronize(); CUDA_CHECK_ERR(); } if (_A.Rows() > _A.Cols()) { cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(1.), &A, &y_vec, static_cast<T>(1.), &x_vec); cml::linalg_cholesky_svx(hdl, &L, &x_vec); cml::blas_gemv(hdl, HIPBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(0.), &y_vec); } else { cml::blas_gemv(hdl, HIPBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(-1.), &y_vec); cml::linalg_cholesky_svx(hdl, &L, &y_vec); cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec, static_cast<T>(1.), &x_vec); cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec); } hipDeviceSynchronize(); CUDA_CHECK_ERR(); } else { const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor> (info->AA, min_dim, min_dim); cml::matrix<T, CblasColMajor> L = cml::matrix_view_array<T, CblasColMajor> (info->L, min_dim, min_dim); CUDA_CHECK_ERR(); if (s != info->s) { cml::matrix_memcpy(&L, &AA); cml::vector<T> diagL = cml::matrix_diagonal(&L); cml::vector_add_constant(&diagL, s); hipDeviceSynchronize(); CUDA_CHECK_ERR(); cml::linalg_cholesky_decomp(hdl, &L); hipDeviceSynchronize(); CUDA_CHECK_ERR(); } if (_A.Rows() > _A.Cols()) { cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(1.), &A, &y_vec, static_cast<T>(1.), &x_vec); cml::linalg_cholesky_svx(hdl, &L, &x_vec); cml::blas_gemv(hdl, HIPBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(0.), &y_vec); } else { cml::blas_gemv(hdl, HIPBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(-1.), &y_vec); cml::linalg_cholesky_svx(hdl, &L, &y_vec); cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec, static_cast<T>(1.), &x_vec); cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec); } hipDeviceSynchronize(); CUDA_CHECK_ERR(); } #ifdef DEBUG // Verify that projection was successful. CheckProjection(&_A, x0, y0, x, y, s, static_cast<T>(1e3) * std::numeric_limits<T>::epsilon()); #endif info->s = s; return 0; } template class ProjectorDirect<double, MatrixDense<double> >; template class ProjectorDirect<float, MatrixDense<float> >; } // namespace pogs
39c025adfe32d39e41c4ddaa697c23549337c527.cu
#include <cublas_v2.h> #include <algorithm> #include <limits> #include "cml/cml_blas.cuh" #include "cml/cml_linalg.cuh" #include "cml/cml_matrix.cuh" #include "matrix/matrix_dense.h" #include "projector/projector_direct.h" #include "projector_helper.cuh" #include "util.h" namespace pogs { namespace { template<typename T> struct GpuData { T *AA, *L, s; cublasHandle_t handle; GpuData() : AA(0), L(0), s(static_cast<T>(-1.)) { cublasCreate(&handle); CUDA_CHECK_ERR(); } ~GpuData() { cublasDestroy(handle); CUDA_CHECK_ERR(); } }; } // namespace template <typename T, typename M> ProjectorDirect<T, M>::ProjectorDirect(const M& A) : _A(A) { // Set GPU specific this->_info. GpuData<T> *info = new GpuData<T>(); this->_info = reinterpret_cast<void*>(info); } template <typename T, typename M> ProjectorDirect<T, M>::~ProjectorDirect() { GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); if (info->AA) { cudaFree(info->AA); info->AA = 0; CUDA_CHECK_ERR(); } if (info->L) { cudaFree(info->L); info->L = 0; CUDA_CHECK_ERR(); } delete info; this->_info = 0; } template <typename T, typename M> int ProjectorDirect<T, M>::Init() { if (this->_done_init) return 1; this->_done_init = true; ASSERT(_A.IsInit()); GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); size_t min_dim = std::min(_A.Rows(), _A.Cols()); cudaMalloc(&(info->AA), min_dim * min_dim * sizeof(T)); cudaMalloc(&(info->L), min_dim * min_dim * sizeof(T)); cudaMemset(info->AA, 0, min_dim * min_dim * sizeof(T)); cudaMemset(info->L, 0, min_dim * min_dim * sizeof(T)); CUDA_CHECK_ERR(); cublasOperation_t op_type = _A.Rows() > _A.Cols() ? CUBLAS_OP_T : CUBLAS_OP_N; // Compute AA if (_A.Order() == MatrixDense<T>::ROW) { const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor> (info->AA, min_dim, min_dim); cml::blas_syrk(info->handle, CUBLAS_FILL_MODE_LOWER, op_type, static_cast<T>(1.), &A, static_cast<T>(0.), &AA); } else { const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor> (info->AA, min_dim, min_dim); cml::blas_syrk(info->handle, CUBLAS_FILL_MODE_LOWER, op_type, static_cast<T>(1.), &A, static_cast<T>(0.), &AA); } CUDA_CHECK_ERR(); return 0; } template <typename T, typename M> int ProjectorDirect<T, M>::Project(const T *x0, const T *y0, T s, T *x, T *y, T tol) { DEBUG_EXPECT(this->_done_init); if (!this->_done_init || s < static_cast<T>(0.)) return 1; // Get Cublas handle GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); cublasHandle_t hdl = info->handle; size_t min_dim = std::min(_A.Rows(), _A.Cols()); // Set up views for raw vectors. cml::vector<T> y_vec = cml::vector_view_array(y, _A.Rows()); const cml::vector<T> y0_vec = cml::vector_view_array(y0, _A.Rows()); cml::vector<T> x_vec = cml::vector_view_array(x, _A.Cols()); const cml::vector<T> x0_vec = cml::vector_view_array(x0, _A.Cols()); // Set (x, y) = (x0, y0). cml::vector_memcpy(&x_vec, &x0_vec); cml::vector_memcpy(&y_vec, &y0_vec); CUDA_CHECK_ERR(); if (_A.Order() == MatrixDense<T>::ROW) { const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasRowMajor> AA = cml::matrix_view_array<T, CblasRowMajor> (info->AA, min_dim, min_dim); cml::matrix<T, CblasRowMajor> L = cml::matrix_view_array<T, CblasRowMajor> (info->L, min_dim, min_dim); CUDA_CHECK_ERR(); if (s != info->s) { cml::matrix_memcpy(&L, &AA); cml::vector<T> diagL = cml::matrix_diagonal(&L); cml::vector_add_constant(&diagL, s); cudaDeviceSynchronize(); CUDA_CHECK_ERR(); cml::linalg_cholesky_decomp(hdl, &L); cudaDeviceSynchronize(); CUDA_CHECK_ERR(); } if (_A.Rows() > _A.Cols()) { cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &y_vec, static_cast<T>(1.), &x_vec); cml::linalg_cholesky_svx(hdl, &L, &x_vec); cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(0.), &y_vec); } else { cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(-1.), &y_vec); cml::linalg_cholesky_svx(hdl, &L, &y_vec); cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec, static_cast<T>(1.), &x_vec); cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec); } cudaDeviceSynchronize(); CUDA_CHECK_ERR(); } else { const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor> (_A.Data(), _A.Rows(), _A.Cols()); cml::matrix<T, CblasColMajor> AA = cml::matrix_view_array<T, CblasColMajor> (info->AA, min_dim, min_dim); cml::matrix<T, CblasColMajor> L = cml::matrix_view_array<T, CblasColMajor> (info->L, min_dim, min_dim); CUDA_CHECK_ERR(); if (s != info->s) { cml::matrix_memcpy(&L, &AA); cml::vector<T> diagL = cml::matrix_diagonal(&L); cml::vector_add_constant(&diagL, s); cudaDeviceSynchronize(); CUDA_CHECK_ERR(); cml::linalg_cholesky_decomp(hdl, &L); cudaDeviceSynchronize(); CUDA_CHECK_ERR(); } if (_A.Rows() > _A.Cols()) { cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &y_vec, static_cast<T>(1.), &x_vec); cml::linalg_cholesky_svx(hdl, &L, &x_vec); cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(0.), &y_vec); } else { cml::blas_gemv(hdl, CUBLAS_OP_N, static_cast<T>(1.), &A, &x_vec, static_cast<T>(-1.), &y_vec); cml::linalg_cholesky_svx(hdl, &L, &y_vec); cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(-1.), &A, &y_vec, static_cast<T>(1.), &x_vec); cml::blas_axpy(hdl, static_cast<T>(1.), &y0_vec, &y_vec); } cudaDeviceSynchronize(); CUDA_CHECK_ERR(); } #ifdef DEBUG // Verify that projection was successful. CheckProjection(&_A, x0, y0, x, y, s, static_cast<T>(1e3) * std::numeric_limits<T>::epsilon()); #endif info->s = s; return 0; } template class ProjectorDirect<double, MatrixDense<double> >; template class ProjectorDirect<float, MatrixDense<float> >; } // namespace pogs
25d4c020841ad777a2805ae3d21340e424a383b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "softmax_loss_ohem_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossOHEMLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() >= 2) { top[1]->ShareData(prob_); } if (top.size() >= 3) { // Output per-instance loss caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data, top[2]->mutable_gpu_data()); } // Fix a bug, which happens when propagate_down[0] = false in backward caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossOHEMLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossOHEMLayer); } // namespace caffe
25d4c020841ad777a2805ae3d21340e424a383b8.cu
#include <algorithm> #include <cfloat> #include <vector> #include "softmax_loss_ohem_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossOHEMLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() >= 2) { top[1]->ShareData(prob_); } if (top.size() >= 3) { // Output per-instance loss caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data, top[2]->mutable_gpu_data()); } // Fix a bug, which happens when propagate_down[0] = false in backward caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossOHEMLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossOHEMLayer); } // namespace caffe
981f8baf5a299be5baeeeb90e667526bd32c4004.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <algorithm> #include <stdio.h> #include "commonFunction.h" #include "TestFunction.h" #define ARRAY_SIZE 49 #include<cmath> using namespace std; __global__ void calculateSimilarity2(float* c, const float *a, const int NA, const int NB, const int NMax); hipError_t calculateSimilarityWithCuda2(float* c, const float *a, const int NA, const int NB, const int NMax, string fileName); /** int main() { const int NA = 7; const int NB = 7; const int NMax = 1; hipError_t cudaStatus; float A[NA*NB*NMax] = {4}; float C[NMax]; cudaStatus = calculateSimilarityWithCuda(C, A,NA,NB, NMax, "../calculateSimilarityTimeResult/calculateSimilarity2.txt"); //print out C for correctness checking printf("C[] array is %.2f\n", C[0]); testFunction(A, NA, NB, NMax, 1000,10000,1000, "../calculateSimilarityTimeResult/calculateSimilarity2.txt", &calculateSimilarityWithCuda2); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } return 0; } **/ /** Algorithm: (1) Sort the elements of the atom match matri into order of decreasing similiarity (not necessary because we will need to find max anyway) (2) Scan the atom match matrix to find the remaining pair of atoms, one from A and one from B, that has the largest calculated value for S(i,j) (3) Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)] (4) Remove A(i) and B(j) from further consideration (5) Return to step 2 if it is possible to map further atoms in A to atoms in B input: array of float c: containing result of total of NA max_element over NA array of float a: containing coordinates NA*NB*NMax elements to find max_element const int NA: number of atoms in molecule A const int NB: number of atoms in each molecule in B const int NMax: number of molecules in B output: void **/ __global__ void calculateSimilarity2(float* c, const float *a, const int NA, const int NB, const int NMax){ float temp[ARRAY_SIZE]; float total; int position; int tid= blockIdx.x*blockDim.x+threadIdx.x; // Each thread work on comparing 1 molecule of A to 1 molecule of B // If we have NMax molecule B, we need NMax threads if (tid < NMax) { // Copy the appropriate part of a big array into a small one. for (int q = 0; q<NA*NB; q++) { temp[q] = a[tid*NA*NB + q]; } // Initialised each thread's total to 0 total = 0; //loop through NA atoms of molecule A for (int k =0;k<NA; k++) { /** Step 2: Scan the atom match matrix to find the remaining pair of atoms, one from A and one from B, that has the largest calculated value for S(i,j) **/ // Find the max_element and position of max_element in the array of NA*NB float position = 0; float max = temp[0]; for (int t = 0; t<NA*NB; t++) { if (temp[t] > max) { max = temp[t]; position=t; } } /** Step 3: Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)] **/ // Sum the max into total total = total + max; // Get the position of max_element in 2D array int a = position/NB; //y axis int b = position%NB; // x axis /** Step 4: Remove A(i) and B(j) from further consideration **/ // Set all the elements in the same row and column of max_element to 0 // set all elements in the same y axis of max = 0 for (int i =0; i<NB; i++ ) temp[a*NB+i] =0; // set all elements in the same x axis of max = 0 for (int j =0; j<NA; j++) temp[j*NB+b] =0; } //The similiarity score is total/NA c[tid] = total /NA; } } // Helper function for using CUDA to add vectors in parallel. hipError_t calculateSimilarityWithCuda2(float* c, const float *a, const int NA, const int NB, const int NMax, string fileName) { float *dev_a = 0; float *dev_c = 0; hipError_t cudaStatus; hipEvent_t start, stop; float milliseconds; cudaStatus = hipEventCreate(&start); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventCreate(& start) failed! in scanWithCuda\n"); goto Error; } cudaStatus = hipEventCreate(&stop); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventCreate(& stop) failed! in scanWithCuda\n"); goto Error; } //Start recording time cudaStatus = hipEventRecord(start); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventRecord(start) failed! in scanWithCuda\n"); goto Error; } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, NMax*sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! for dev_c in scanWithCuda\n"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, NA*NB*NMax * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed! for dev_a in scanWithCuda\n"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, NA * NB *NMax* sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed for dev_a! in scanWithCuda"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( calculateSimilarity2), dim3(NMax/1024 +1), dim3(1024), 0, 0, dev_c, dev_a, NA, NB, NMax); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s in scanWithCuda\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, NMax*sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed for dev_c! in scanWithCuda`\n"); goto Error; } cudaStatus = hipEventRecord(stop); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventRecord(start) failed! in scanWithCuda\n"); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus); goto Error; } cudaStatus = hipEventElapsedTime(&milliseconds, start, stop); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipEventElapsedTime failed! in scanWithCuda\n"); goto Error; } printf("elapsed time of scanning matrix of NA = %d, NB = %d, NMax = %d is %.4f milliseconds \n", NA,NB,NMax, milliseconds); writeResult2File (NA, NB, NMax, milliseconds, "milliseconds", fileName); Error: hipFree(dev_c); hipFree(dev_a); return cudaStatus; }
981f8baf5a299be5baeeeb90e667526bd32c4004.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <algorithm> #include <stdio.h> #include "commonFunction.h" #include "TestFunction.h" #define ARRAY_SIZE 49 #include<cmath> using namespace std; __global__ void calculateSimilarity2(float* c, const float *a, const int NA, const int NB, const int NMax); cudaError_t calculateSimilarityWithCuda2(float* c, const float *a, const int NA, const int NB, const int NMax, string fileName); /** int main() { const int NA = 7; const int NB = 7; const int NMax = 1; cudaError_t cudaStatus; float A[NA*NB*NMax] = {4}; float C[NMax]; cudaStatus = calculateSimilarityWithCuda(C, A,NA,NB, NMax, "../calculateSimilarityTimeResult/calculateSimilarity2.txt"); //print out C for correctness checking printf("C[] array is %.2f\n", C[0]); testFunction(A, NA, NB, NMax, 1000,10000,1000, "../calculateSimilarityTimeResult/calculateSimilarity2.txt", &calculateSimilarityWithCuda2); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } return 0; } **/ /** Algorithm: (1) Sort the elements of the atom match matri into order of decreasing similiarity (not necessary because we will need to find max anyway) (2) Scan the atom match matrix to find the remaining pair of atoms, one from A and one from B, that has the largest calculated value for S(i,j) (3) Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)] (4) Remove A(i) and B(j) from further consideration (5) Return to step 2 if it is possible to map further atoms in A to atoms in B input: array of float c: containing result of total of NA max_element over NA array of float a: containing coordinates NA*NB*NMax elements to find max_element const int NA: number of atoms in molecule A const int NB: number of atoms in each molecule in B const int NMax: number of molecules in B output: void **/ __global__ void calculateSimilarity2(float* c, const float *a, const int NA, const int NB, const int NMax){ float temp[ARRAY_SIZE]; float total; int position; int tid= blockIdx.x*blockDim.x+threadIdx.x; // Each thread work on comparing 1 molecule of A to 1 molecule of B // If we have NMax molecule B, we need NMax threads if (tid < NMax) { // Copy the appropriate part of a big array into a small one. for (int q = 0; q<NA*NB; q++) { temp[q] = a[tid*NA*NB + q]; } // Initialised each thread's total to 0 total = 0; //loop through NA atoms of molecule A for (int k =0;k<NA; k++) { /** Step 2: Scan the atom match matrix to find the remaining pair of atoms, one from A and one from B, that has the largest calculated value for S(i,j) **/ // Find the max_element and position of max_element in the array of NA*NB float position = 0; float max = temp[0]; for (int t = 0; t<NA*NB; t++) { if (temp[t] > max) { max = temp[t]; position=t; } } /** Step 3: Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)] **/ // Sum the max into total total = total + max; // Get the position of max_element in 2D array int a = position/NB; //y axis int b = position%NB; // x axis /** Step 4: Remove A(i) and B(j) from further consideration **/ // Set all the elements in the same row and column of max_element to 0 // set all elements in the same y axis of max = 0 for (int i =0; i<NB; i++ ) temp[a*NB+i] =0; // set all elements in the same x axis of max = 0 for (int j =0; j<NA; j++) temp[j*NB+b] =0; } //The similiarity score is total/NA c[tid] = total /NA; } } // Helper function for using CUDA to add vectors in parallel. cudaError_t calculateSimilarityWithCuda2(float* c, const float *a, const int NA, const int NB, const int NMax, string fileName) { float *dev_a = 0; float *dev_c = 0; cudaError_t cudaStatus; cudaEvent_t start, stop; float milliseconds; cudaStatus = cudaEventCreate(&start); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventCreate(& start) failed! in scanWithCuda\n"); goto Error; } cudaStatus = cudaEventCreate(&stop); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventCreate(& stop) failed! in scanWithCuda\n"); goto Error; } //Start recording time cudaStatus = cudaEventRecord(start); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventRecord(start) failed! in scanWithCuda\n"); goto Error; } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, NMax*sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! for dev_c in scanWithCuda\n"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, NA*NB*NMax * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed! for dev_a in scanWithCuda\n"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, NA * NB *NMax* sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed for dev_a! in scanWithCuda"); goto Error; } // Launch a kernel on the GPU with one thread for each element. calculateSimilarity2<<<NMax/1024 +1, 1024>>>(dev_c, dev_a, NA, NB, NMax); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s in scanWithCuda\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, NMax*sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed for dev_c! in scanWithCuda`\n"); goto Error; } cudaStatus = cudaEventRecord(stop); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventRecord(start) failed! in scanWithCuda\n"); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus); goto Error; } cudaStatus = cudaEventElapsedTime(&milliseconds, start, stop); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaEventElapsedTime failed! in scanWithCuda\n"); goto Error; } printf("elapsed time of scanning matrix of NA = %d, NB = %d, NMax = %d is %.4f milliseconds \n", NA,NB,NMax, milliseconds); writeResult2File (NA, NB, NMax, milliseconds, "milliseconds", fileName); Error: cudaFree(dev_c); cudaFree(dev_a); return cudaStatus; }
5393dec7c5f80dd97166847af3dd675b4f20ca65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/UpSample.cuh> namespace at { namespace native { namespace { #define MAX_THREADS 512 template <typename scalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest1d_out_frame( const scalar_t* input, size_t dim_b, size_t dim_c, size_t src_dim_w, size_t dst_dim_w, scalar_t* output) { size_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_w) return; float scale_factor = (float)src_dim_w / (float)dst_dim_w; int c = (dst_idx / dst_dim_w) % dim_c; int dst_x = dst_idx % dst_dim_w; int src_x = nearest_neighbor_compute_source_index(scale_factor, dst_x, src_dim_w); size_t src_idx = c * src_dim_w + src_x; int src_stride = dim_c * src_dim_w; int dst_stride = dim_c * dst_dim_w; for (int b = 0; b < dim_b; b++) { output[dst_idx] = input[src_idx]; src_idx += src_stride; dst_idx += dst_stride; } } // Backward operation template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest1d_backward_out_frame( const scalar_t* grad_o, size_t dim_b, size_t dim_c, size_t src_dim_w, size_t dst_dim_w, scalar_t* grad_i) { size_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_w) return; float scale_factor = (float)src_dim_w / (float)dst_dim_w; int c = (dst_idx / (dst_dim_w)) % dim_c; int dst_x = dst_idx % dst_dim_w; int src_x = nearest_neighbor_compute_source_index(scale_factor, dst_x, src_dim_w); int src_x_up = nearest_neighbor_compute_source_index(scale_factor, dst_x+1, src_dim_w+1); for (int b = 0; b < dim_b; b++) { accscalar_t grad = 0; size_t src_idx = b * dim_c * src_dim_w + c * src_dim_w + src_x; for (int x = src_x; x < src_x_up; x++) { grad += grad_o[src_idx++]; } grad_i[dst_idx] = grad; dst_idx += dim_c * dst_dim_w; } } static void upsample_nearest1d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef output_size) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_nearest1d_out_cuda", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); int output_width = output_size[0]; int nbatch = input_.size(0); int channels = input_.size(1); int input_width = input_.size(2); upsample_1d_shape_check( input_, Tensor(), nbatch, channels, input_width, output_width); AT_ASSERT(input_width > 0 && output_width > 0); Tensor input = input_.contiguous(); output.resize_({input.size(0), input.size(1), output_width}); // upsample_1d_shape_check makes sure `nbatch != 0` unsigned int n = output.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[0], cuda::ATenCeilDiv(n, bdim.x))}; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_nearest1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.data_ptr<scalar_t>(); auto odata = output.data_ptr<scalar_t>(); hipLaunchKernelGGL(( upsample_nearest1d_out_frame<scalar_t>), dim3(gdim), dim3(bdim), 0, stream, idata, nbatch, channels, input_width, output_width, odata); }); AT_CUDA_CHECK(hipGetLastError()); } static void upsample_nearest1d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest1d_backward_out_cuda_template", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 3, "It is expected input_size equals to 3, but got size ", input_size.size()); int output_width = output_size[0]; int nbatch = input_size[0]; int channels = input_size[1]; int input_width = input_size[2]; upsample_1d_shape_check( Tensor(), grad_output_, nbatch, channels, input_width, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_width}); // upsample_1d_shape_check makes sure `nbatch != 0` unsigned int n = grad_input.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[0], cuda::ATenCeilDiv(n, bdim.x))}; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_nearest1d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.data_ptr<scalar_t>(); auto odata = grad_output.data_ptr<scalar_t>(); hipLaunchKernelGGL(( upsample_nearest1d_backward_out_frame<scalar_t, accscalar_t>) , dim3(gdim), dim3(bdim), 0, stream, odata, nbatch, channels, output_width, input_width, idata); }); AT_CUDA_CHECK(hipGetLastError()); } } // namespace Tensor& upsample_nearest1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size) { upsample_nearest1d_out_cuda_template(output, input, output_size); return output; } Tensor upsample_nearest1d_cuda(const Tensor& input, IntArrayRef output_size) { Tensor output = at::empty_like(input); upsample_nearest1d_out_cuda_template(output, input, output_size); return output; } Tensor& upsample_nearest1d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size) { upsample_nearest1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size); return grad_input; } Tensor upsample_nearest1d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size) { Tensor grad_input = at::empty_like(grad_output); upsample_nearest1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size); return grad_input; } } // namespace native } // namespace at
5393dec7c5f80dd97166847af3dd675b4f20ca65.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/UpSample.cuh> namespace at { namespace native { namespace { #define MAX_THREADS 512 template <typename scalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest1d_out_frame( const scalar_t* input, size_t dim_b, size_t dim_c, size_t src_dim_w, size_t dst_dim_w, scalar_t* output) { size_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_w) return; float scale_factor = (float)src_dim_w / (float)dst_dim_w; int c = (dst_idx / dst_dim_w) % dim_c; int dst_x = dst_idx % dst_dim_w; int src_x = nearest_neighbor_compute_source_index(scale_factor, dst_x, src_dim_w); size_t src_idx = c * src_dim_w + src_x; int src_stride = dim_c * src_dim_w; int dst_stride = dim_c * dst_dim_w; for (int b = 0; b < dim_b; b++) { output[dst_idx] = input[src_idx]; src_idx += src_stride; dst_idx += dst_stride; } } // Backward operation template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void upsample_nearest1d_backward_out_frame( const scalar_t* grad_o, size_t dim_b, size_t dim_c, size_t src_dim_w, size_t dst_dim_w, scalar_t* grad_i) { size_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x; if (dst_idx >= dim_c * dst_dim_w) return; float scale_factor = (float)src_dim_w / (float)dst_dim_w; int c = (dst_idx / (dst_dim_w)) % dim_c; int dst_x = dst_idx % dst_dim_w; int src_x = nearest_neighbor_compute_source_index(scale_factor, dst_x, src_dim_w); int src_x_up = nearest_neighbor_compute_source_index(scale_factor, dst_x+1, src_dim_w+1); for (int b = 0; b < dim_b; b++) { accscalar_t grad = 0; size_t src_idx = b * dim_c * src_dim_w + c * src_dim_w + src_x; for (int x = src_x; x < src_x_up; x++) { grad += grad_o[src_idx++]; } grad_i[dst_idx] = grad; dst_idx += dim_c * dst_dim_w; } } static void upsample_nearest1d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef output_size) { TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2}; checkAllSameGPU("upsample_nearest1d_out_cuda", {input_arg, output_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); int output_width = output_size[0]; int nbatch = input_.size(0); int channels = input_.size(1); int input_width = input_.size(2); upsample_1d_shape_check( input_, Tensor(), nbatch, channels, input_width, output_width); AT_ASSERT(input_width > 0 && output_width > 0); Tensor input = input_.contiguous(); output.resize_({input.size(0), input.size(1), output_width}); // upsample_1d_shape_check makes sure `nbatch != 0` unsigned int n = output.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[0], cuda::ATenCeilDiv(n, bdim.x))}; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "upsample_nearest1d_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = input.data_ptr<scalar_t>(); auto odata = output.data_ptr<scalar_t>(); upsample_nearest1d_out_frame<scalar_t><<<gdim, bdim, 0, stream>>>( idata, nbatch, channels, input_width, output_width, odata); }); AT_CUDA_CHECK(cudaGetLastError()); } static void upsample_nearest1d_backward_out_cuda_template( Tensor& grad_input, const Tensor& grad_output_, IntArrayRef output_size, IntArrayRef input_size) { TensorArg grad_input_arg{grad_input, "grad_input", 1}, grad_output_arg{grad_output_, "grad_output_", 2}; checkAllSameGPU( "upsample_nearest1d_backward_out_cuda_template", {grad_output_arg, grad_input_arg}); TORCH_CHECK( output_size.size() == 1, "It is expected output_size equals to 1, but got size ", output_size.size()); TORCH_CHECK( input_size.size() == 3, "It is expected input_size equals to 3, but got size ", input_size.size()); int output_width = output_size[0]; int nbatch = input_size[0]; int channels = input_size[1]; int input_width = input_size[2]; upsample_1d_shape_check( Tensor(), grad_output_, nbatch, channels, input_width, output_width); Tensor grad_output = grad_output_.contiguous(); grad_input.resize_({nbatch, channels, input_width}); // upsample_1d_shape_check makes sure `nbatch != 0` unsigned int n = grad_input.numel() / nbatch; dim3 bdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)}; dim3 gdim{std::min<unsigned int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[0], cuda::ATenCeilDiv(n, bdim.x))}; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.scalar_type(), "upsample_nearest1d_backward_out_frame", [&] { using accscalar_t = at::acc_type<scalar_t, true>; auto idata = grad_input.data_ptr<scalar_t>(); auto odata = grad_output.data_ptr<scalar_t>(); upsample_nearest1d_backward_out_frame<scalar_t, accscalar_t> <<<gdim, bdim, 0, stream>>>( odata, nbatch, channels, output_width, input_width, idata); }); AT_CUDA_CHECK(cudaGetLastError()); } } // namespace Tensor& upsample_nearest1d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef output_size) { upsample_nearest1d_out_cuda_template(output, input, output_size); return output; } Tensor upsample_nearest1d_cuda(const Tensor& input, IntArrayRef output_size) { Tensor output = at::empty_like(input); upsample_nearest1d_out_cuda_template(output, input, output_size); return output; } Tensor& upsample_nearest1d_backward_out_cuda( Tensor& grad_input, const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size) { upsample_nearest1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size); return grad_input; } Tensor upsample_nearest1d_backward_cuda( const Tensor& grad_output, IntArrayRef output_size, IntArrayRef input_size) { Tensor grad_input = at::empty_like(grad_output); upsample_nearest1d_backward_out_cuda_template( grad_input, grad_output, output_size, input_size); return grad_input; } } // namespace native } // namespace at
f33c10697114bd3357524cb37b73fde954e90342.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "advectParticles_OGL.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float2 *part = NULL; hipMalloc(&part, XSIZE*YSIZE); float2 *v = NULL; hipMalloc(&v, XSIZE*YSIZE); int dx = 1; int dy = 1; float dt = 1; int lb = 1; size_t pitch = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( advectParticles_OGL), dim3(gridBlock),dim3(threadBlock), 0, 0, part,v,dx,dy,dt,lb,pitch); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( advectParticles_OGL), dim3(gridBlock),dim3(threadBlock), 0, 0, part,v,dx,dy,dt,lb,pitch); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( advectParticles_OGL), dim3(gridBlock),dim3(threadBlock), 0, 0, part,v,dx,dy,dt,lb,pitch); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f33c10697114bd3357524cb37b73fde954e90342.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "advectParticles_OGL.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float2 *part = NULL; cudaMalloc(&part, XSIZE*YSIZE); float2 *v = NULL; cudaMalloc(&v, XSIZE*YSIZE); int dx = 1; int dy = 1; float dt = 1; int lb = 1; size_t pitch = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); advectParticles_OGL<<<gridBlock,threadBlock>>>(part,v,dx,dy,dt,lb,pitch); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { advectParticles_OGL<<<gridBlock,threadBlock>>>(part,v,dx,dy,dt,lb,pitch); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { advectParticles_OGL<<<gridBlock,threadBlock>>>(part,v,dx,dy,dt,lb,pitch); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a28f919bcf8649d0f9f6ac564ea6c17e7a369e69.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #define LIST_SIZE_GLOBAL 3000000 #define LIST_SIZE 10000 extern "C" __device__ unsigned long long load_store_index[LIST_SIZE]; extern "C" __device__ unsigned long long load_store_address[LIST_SIZE]; extern "C" __device__ unsigned long long load_store_check[LIST_SIZE]; extern "C" __device__ unsigned long long record_flag; extern "C" __device__ unsigned long long call_count; int memPro_kernel = 0; void bambooLogRecordOff(){ long long local_record = 0; hipMemcpyToSymbol(record_flag, &local_record, sizeof(long long), 0, hipMemcpyHostToDevice); } void bambooLogKernelBegin(long long i) { hipMemcpyToSymbol(call_count, &i, sizeof(long long), 0, hipMemcpyHostToDevice); i = 1; hipMemcpyToSymbol(record_flag, &i, sizeof(long long), 0, hipMemcpyHostToDevice); } void bambooLogKernelEnd() { unsigned long long loadStoreIndex[LIST_SIZE] = {0}; unsigned long long loadStoreAddress[LIST_SIZE] = {0}; unsigned long long loadStoreCheck[LIST_SIZE] = {0}; FILE *profileFile = fopen("profile_mem_result.txt", "a"); for (int j=0; j < LIST_SIZE_GLOBAL; j+=LIST_SIZE) { hipMemcpyFromSymbol(loadStoreIndex, load_store_index, LIST_SIZE * sizeof(long long), j*sizeof(long long), hipMemcpyDeviceToHost); hipMemcpyFromSymbol(loadStoreAddress, load_store_address, LIST_SIZE * sizeof(long long), j*sizeof(long long), hipMemcpyDeviceToHost); hipMemcpyFromSymbol(loadStoreCheck, load_store_check, LIST_SIZE * sizeof(long long), j*sizeof(long long), hipMemcpyDeviceToHost); for(long long i=0; i < LIST_SIZE; i++) { if(loadStoreIndex[i] != 0) { if (loadStoreCheck[i] == 0) { fprintf(profileFile, "L %lld %p\n", loadStoreIndex[i], (void*)loadStoreAddress[i]); } else { fprintf(profileFile, "S %lld %p\n", loadStoreIndex[i], (void*)loadStoreAddress[i]); } } } } fclose(profileFile); }
a28f919bcf8649d0f9f6ac564ea6c17e7a369e69.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda.h> #include <device_launch_parameters.h> #define LIST_SIZE_GLOBAL 3000000 #define LIST_SIZE 10000 extern "C" __device__ unsigned long long load_store_index[LIST_SIZE]; extern "C" __device__ unsigned long long load_store_address[LIST_SIZE]; extern "C" __device__ unsigned long long load_store_check[LIST_SIZE]; extern "C" __device__ unsigned long long record_flag; extern "C" __device__ unsigned long long call_count; int memPro_kernel = 0; void bambooLogRecordOff(){ long long local_record = 0; cudaMemcpyToSymbol(record_flag, &local_record, sizeof(long long), 0, cudaMemcpyHostToDevice); } void bambooLogKernelBegin(long long i) { cudaMemcpyToSymbol(call_count, &i, sizeof(long long), 0, cudaMemcpyHostToDevice); i = 1; cudaMemcpyToSymbol(record_flag, &i, sizeof(long long), 0, cudaMemcpyHostToDevice); } void bambooLogKernelEnd() { unsigned long long loadStoreIndex[LIST_SIZE] = {0}; unsigned long long loadStoreAddress[LIST_SIZE] = {0}; unsigned long long loadStoreCheck[LIST_SIZE] = {0}; FILE *profileFile = fopen("profile_mem_result.txt", "a"); for (int j=0; j < LIST_SIZE_GLOBAL; j+=LIST_SIZE) { cudaMemcpyFromSymbol(loadStoreIndex, load_store_index, LIST_SIZE * sizeof(long long), j*sizeof(long long), cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(loadStoreAddress, load_store_address, LIST_SIZE * sizeof(long long), j*sizeof(long long), cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(loadStoreCheck, load_store_check, LIST_SIZE * sizeof(long long), j*sizeof(long long), cudaMemcpyDeviceToHost); for(long long i=0; i < LIST_SIZE; i++) { if(loadStoreIndex[i] != 0) { if (loadStoreCheck[i] == 0) { fprintf(profileFile, "L %lld %p\n", loadStoreIndex[i], (void*)loadStoreAddress[i]); } else { fprintf(profileFile, "S %lld %p\n", loadStoreIndex[i], (void*)loadStoreAddress[i]); } } } } fclose(profileFile); }
9838e56f6f9bfbe6a0cc777c3efa0c834ab85cc5.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2019 by Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #include "../common/transform.h" #include "../common/common.h" #include "./regression_loss.h" namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<int> label_correct_; public: RegLossObj() = default; void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { if (info.labels_.Size() == 0U) { LOG(WARNING) << "Label set is empty."; } CHECK_EQ(preds.Size(), info.labels_.Size()) << " " << "labels are not correctly provided" << "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size() << ", " << "Loss: " << Loss::Name(); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } auto scale_pos_weight = param_.scale_pos_weight; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = Loss::PredTransform(_preds[_idx]); bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float label = _labels[_idx]; if (label == 1.0f) { w *= scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, tparam_->gpu_id) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = toJson(param_); } void LoadConfig(Json const& in) override { fromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, tparam_->gpu_id) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = toJson(param_); } void LoadConfig(Json const& in) override { fromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Possion regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> > &args) override {} void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += ::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels_.HostVector(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = ::exp(p); const double w = info.GetWeight(ind); const double y = labels[ind]; const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) #pragma omp parallel for schedule(static) for (long j = 0; j < ndata; ++j) { // NOLINT(*) preds[j] = ::exp(preds[j]); } } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> > &args) override {} void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = tparam_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, tparam_->gpu_id) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ ::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, device) .Eval(&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, tparam_->gpu_id) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = toJson(param_); } void LoadConfig(Json const& in) override { fromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); } // namespace obj } // namespace xgboost
9838e56f6f9bfbe6a0cc777c3efa0c834ab85cc5.cu
/*! * Copyright 2015-2019 by Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #include "../common/transform.h" #include "../common/common.h" #include "./regression_loss.h" namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<int> label_correct_; public: RegLossObj() = default; void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { if (info.labels_.Size() == 0U) { LOG(WARNING) << "Label set is empty."; } CHECK_EQ(preds.Size(), info.labels_.Size()) << " " << "labels are not correctly provided" << "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size() << ", " << "Loss: " << Loss::Name(); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } auto scale_pos_weight = param_.scale_pos_weight; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = Loss::PredTransform(_preds[_idx]); bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float label = _labels[_idx]; if (label == 1.0f) { w *= scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, tparam_->gpu_id) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = toJson(param_); } void LoadConfig(Json const& in) override { fromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, tparam_->gpu_id) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = toJson(param_); } void LoadConfig(Json const& in) override { fromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Possion regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> > &args) override {} void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += std::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels_.HostVector(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = std::exp(p); const double w = info.GetWeight(ind); const double y = labels[ind]; const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) #pragma omp parallel for schedule(static) for (long j = 0; j < ndata; ++j) { // NOLINT(*) preds[j] = std::exp(preds[j]); } } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> > &args) override {} void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = tparam_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, device).Eval( &label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, tparam_->gpu_id) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = tparam_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, device) .Eval(&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, tparam_->gpu_id) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = toJson(param_); } void LoadConfig(Json const& in) override { fromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); } // namespace obj } // namespace xgboost
33645b47b720f746a3df7074fa1eb68b7b9eff95.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> void GraphLinkageLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // get the nunmber of samples //int num = bottom[0]->num(); //int count = bottom[0]->count(); //const int channels = bottom[0]->channels(); //const Dtype* features = bottom[0]->cpu_data(); //const Dtype* labels = bottom[1]->cpu_data(); //// compute distance matrix //sigma_ = 0.0; //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // caffe_sub(channels, // bottom[0]->cpu_data() + (i * channels), // bottom[0]->cpu_data() + (j * channels), // diff_.mutable_cpu_data() + (i * num + j) * channels); // Dtype d_sq = caffe_cpu_dot(channels, // diff_.cpu_data() + (i * num + j) * channels, // diff_.cpu_data() + (i * num + j) * channels); // dist_sq_.mutable_cpu_data()[i * num + j] = d_sq; // sigma_ += d_sq; // } //} //sigma_ /= num * num; //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // Dtype d_sq = dist_sq_.cpu_data()[i * num + j]; // // if (d_sq < sigma_ * sigma_) { // if (1) { // affinity_sample_.mutable_cpu_data()[i * num + j] = exp(-d_sq / sigma_); // } // else { // affinity_sample_.mutable_cpu_data()[i * num + j] = 0; // } // } //} //// build label map //std::map<int, std::vector<int> > label_indice_map; //for (int i = 0; i < num; ++i) { // label_indice_map[labels[i]].push_back(i); //} //// convert label map to //vector<int> labels_idx; //vector<vector<int> > label_indice; //for (std::map<int, vector<int>>::iterator it = label_indice_map.begin(); it != label_indice_map.end(); ++it) { // labels_idx.push_back(it->first); // label_indice.push_back(it->second); //} //label_indice_ = label_indice; ////// compute the loss = loss_intra + loss_extra //affinity_intra_.Reshape(label_indice.size(), 1, 1, 1); ///// compute intra loss loss_intra //for (int i = 0; i < label_indice.size(); ++i) { // Dtype val(0.0); // //for (int m = 0; m < label_indice[i].size(); ++m) { // // for (int n = 0; n < label_indice[i].size(); ++n) { // // Dtype entry_m_n = affinity_sample_.cpu_data()[label_indice[i][m] * num + // // label_indice[i][n]]; // // Dtype entry_n_m = affinity_sample_.cpu_data()[label_indice[i][n] * num + // // label_indice[i][m]]; // // val += entry_m_n * entry_n_m; // // } // //} // for (int m = 0; m < label_indice[i].size(); ++m) { // for (int n = 0; n < label_indice[i].size(); ++n) { // if (m == n) // continue; // Dtype entry_m_n = affinity_sample_.cpu_data()[label_indice[i][m] * num + // label_indice[i][n]]; // val += entry_m_n; // } // } // affinity_intra_.mutable_cpu_data()[i] = val; // // loss_intra += 1 - val / label_indice[i].size() / label_indice[i].size(); //} ///// compute extra loss loss_extra //affinity_extra_.Reshape(label_indice.size() * label_indice.size(), 1, 1, 1); //for (int i = 0; i < label_indice.size(); ++i) { // for (int j = 0; j < label_indice.size(); ++j) { // if (i == j) { // affinity_extra_.mutable_cpu_data()[j * label_indice.size() + i] = 0; // continue; // } // Dtype A_c_i_j = 0; // //for (int m = 0; m < label_indice[i].size(); ++m) { // // Dtype s_W_c_j_i = 0; // // for (int n = 0; n < label_indice[j].size(); ++n) { // // s_W_c_j_i += affinity_sample_.cpu_data()[label_indice[j][n] * num + // // label_indice[i][m]]; // // // W_samples.at<float>(label_indice[j][n], label_indice[i][m]); // // } // // Dtype s_W_c_i_j = 0; // // for (int n = 0; n < label_indice[j].size(); ++n) { // // s_W_c_i_j += affinity_sample_.cpu_data()[label_indice[i][m] * num + // // label_indice[j][n]]; // // // W_samples.at<float>(label_indice[i][m], label_indice[j][n]); // // } // // A_c_i_j += s_W_c_j_i * s_W_c_i_j; // //} // for (int m = 0; m < label_indice[i].size(); ++m) { // Dtype s_W_c_j_i = 0; // for (int n = 0; n < label_indice[j].size(); ++n) { // s_W_c_j_i += affinity_sample_.cpu_data()[label_indice[j][n] * num + // label_indice[i][m]]; // // W_samples.at<float>(label_indice[j][n], label_indice[i][m]); // } // A_c_i_j += s_W_c_j_i; // } // affinity_extra_.mutable_cpu_data()[j * label_indice.size() + i] = A_c_i_j; // } //} //Dtype loss_intra(0.0), loss_extra(0.0); //Dtype loss(0.0); //int num_intra_valid = 0; //for (int i = 0; i < label_indice.size(); ++i) { // if (label_indice[i].size() == 1) // continue; // loss_intra += 1 - affinity_intra_.cpu_data()[i] / label_indice[i].size() / (label_indice[i].size() - 1); // ++num_intra_valid; //} //num_intra_valid_ = num_intra_valid; //for (int i = 0; i < label_indice.size(); ++i) { // for (int j = 0; j < label_indice.size(); ++j) { // loss_extra += affinity_extra_.cpu_data()[i * label_indice.size() + j] // / label_indice[i].size() / label_indice[j].size(); // A_c_i_j // } //} //loss = loss_intra / num_intra_valid + loss_extra / label_indice.size() / label_indice.size(); //top[0]->mutable_cpu_data()[0] = loss; /*===============================================================*/ /* contrastive loss */ /*===============================================================*/ //// get the nunmber of samples //int num = bottom[0]->num(); //int count = bottom[0]->count(); //const int channels = bottom[0]->channels(); //const Dtype* features = bottom[0]->cpu_data(); //const Dtype* labels = bottom[1]->cpu_data(); //// compute distance matrix //sigma_ = 0.0; //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // //Dtype norm_i = caffe_cpu_dot(channels, // // bottom[0]->cpu_data() + (i * channels), // // bottom[0]->cpu_data() + (i * channels)); // //Dtype norm_j = caffe_cpu_dot(channels, // // bottom[0]->cpu_data() + (j * channels), // // bottom[0]->cpu_data() + (j * channels)); // //caffe_scal(channels, Dtype(1.0) / sqrt(norm_i), bottom[0]->mutable_cpu_data() + (i * channels)); // //caffe_scal(channels, Dtype(1.0) / sqrt(norm_j), bottom[0]->mutable_cpu_data() + (j * channels)); // caffe_sub(channels, // bottom[0]->cpu_data() + (i * channels), // bottom[0]->cpu_data() + (j * channels), // diff_.mutable_cpu_data() + (i * num + j) * channels); // Dtype d_sq = caffe_cpu_dot(channels, // diff_.cpu_data() + (i * num + j) * channels, // diff_.cpu_data() + (i * num + j) * channels); // dist_sq_.mutable_cpu_data()[i * num + j] = d_sq; // sigma_ += d_sq; // } //} //sigma_ = sigma_ / (num * num); //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // Dtype d_sq = dist_sq_.cpu_data()[i * num + j]; // // if (d_sq < sigma_ * sigma_) { // if (1) { // affinity_sample_.mutable_cpu_data()[i * num + j] = exp(-d_sq / sigma_); // } // else { // affinity_sample_.mutable_cpu_data()[i * num + j] = 0; // } // } //} //// build label map //std::map<int, std::vector<int> > label_indice_map; //for (int i = 0; i < num; ++i) { // label_indice_map[labels[i]].push_back(i); //} //// convert label map to //vector<int> labels_idx; //vector<vector<int> > label_indice; //for (std::map<int, vector<int>>::iterator it = label_indice_map.begin(); it != label_indice_map.end(); ++it) { // labels_idx.push_back(it->first); // label_indice.push_back(it->second); //} //label_indice_ = label_indice; ////// compute the loss = loss_intra + loss_extra //Dtype loss_intra(0.0), loss_extra(0.0); //Dtype loss(0.0); //int num_intra_valid = 0; //int num_extra_valid = 0; //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // if (i == j) // continue; // if (labels[i] == labels[j]) { // intra pairs // ++num_intra_valid; // loss_intra += (1 - affinity_sample_.cpu_data()[i * num + j]); // } // else if (labels[i] != labels[j]) { // extra pairs // ++num_extra_valid; // loss_extra += affinity_sample_.cpu_data()[i * num + j] > 0.3 ? // affinity_sample_.cpu_data()[i * num + j] - 0.3: 0; // } // } //} //num_intra_valid_ = num_intra_valid; //num_extra_valid_ = num_extra_valid; //if (num_intra_valid == 0) { // loss = loss_extra / num_extra_valid; //} //else { // loss = loss_intra / num_intra_valid + loss_extra / num_extra_valid; //} //top[0]->mutable_cpu_data()[0] = loss; /*======================================================================*/ /* triplet loss */ /*======================================================================*/ // get the nunmber of samples int num = bottom[0]->num(); int count = bottom[0]->count(); const int channels = bottom[0]->channels(); const Dtype* features = bottom[0]->cpu_data(); const Dtype* labels = bottom[1]->cpu_data(); // build label map std::map<int, std::vector<int> > label_indice_map; for (int i = 0; i < num; ++i) { label_indice_map[labels[i]].push_back(i); } // convert label map to vector<int> labels_idx; vector<vector<int> > label_indice; for (std::map<int, vector<int>>::iterator it = label_indice_map.begin(); it != label_indice_map.end(); ++it) { labels_idx.push_back(it->first); label_indice.push_back(it->second); } label_indice_ = label_indice; num_items_ = 0; // compute number of items int num_neg_sampling = label_indice.size() - 1 > 10 ? 10 : label_indice.size() - 1; for (int i = 0; i < label_indice.size(); ++i) { if (label_indice[i].size() > 1) { // num_items_ += label_indice[i].size() * (label_indice[i].size() - 1) * num_neg_sampling; } } if (num_items_ == 0) { top[0]->mutable_cpu_data()[0] = 0; return; } diff_pos_.Reshape(num_items_, channels, 1, 1); diff_neg_.Reshape(num_items_, channels, 1, 1); rec_pos_.clear(); rec_neg_.clear(); int id_items = 0; // compute triplet loss for (int i = 0; i < label_indice.size(); ++i) { if (label_indice[i].size() > 1) { // for (int m = 0; m < label_indice[i].size(); ++m) { for (int n = 0; n < label_indice[i].size(); ++n) { if (m == n) continue; int idx_m = label_indice[i][m]; int idx_n = label_indice[i][n]; // compute diffs vector<bool> is_choosed(num, false); while (1) { int idx = rand() % num; if (!is_choosed[idx] && labels[idx] != labels[idx_m]) { // compute extra diff caffe_sub(channels, bottom[0]->cpu_data() + (idx_m * channels), bottom[0]->cpu_data() + (idx_n * channels), diff_pos_.mutable_cpu_data() + id_items * channels); // compute intra diff caffe_sub(channels, bottom[0]->cpu_data() + (idx_m * channels), bottom[0]->cpu_data() + (idx * channels), diff_neg_.mutable_cpu_data() + id_items * channels); rec_pos_.push_back(make_pair(idx_m, idx_n)); rec_neg_.push_back(make_pair(idx_m, idx)); is_choosed[idx] = true; ++id_items; } if (id_items % num_neg_sampling == 0) break; } } } } } num_items_ = id_items; Dtype loss(0.0); dist_sq_pos_.Reshape(num_items_, channels, 1, 1); dist_sq_neg_.Reshape(num_items_, channels, 1, 1); dist_sq_.Reshape(num_items_, 1, 1, 1); for (int i = 0; i < num_items_; ++i) { // Triplet loss accumulation // Loss component calculated from a and b dist_sq_pos_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_pos_.cpu_data() + (i*channels), diff_pos_.cpu_data() + (i*channels)); // a b is a similar pair for triplet dist_sq_.mutable_cpu_data()[i] = dist_sq_pos_.cpu_data()[i]; // Loss component calculated from a and c dist_sq_neg_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_neg_.cpu_data() + (i*channels), diff_neg_.cpu_data() + (i*channels)); // a c is a dissimilar pair for triplet dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg_.cpu_data()[i]; loss += ::max(margin_ + dist_sq_.cpu_data()[i], Dtype(0.0)); // loss accumulated accumulated by the triplet part } loss = loss / num_items_ / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void GraphLinkageLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // Dtype margin = this->layer_param_.contrastive_loss_param().margin(); //if (propagate_down[1]) { // LOG(FATAL) << this->type() // << " Layer cannot backpropagate to label inputs."; //} ///*=========================================*/ ///*propagate error gradient to bottom layers*/ ///*=========================================*/ //if (propagate_down[0]) { // // Dtype* bout = bottom[0]->mutable_cpu_diff(); // int num = bottom[0]->num(); // int channels = bottom[0]->channels(); // for (int i = 0; i < label_indice_.size(); ++i) { // intra error propagate // if (label_indice_[i].size() == 1) // continue; // for (int m = 0; m < label_indice_[i].size(); ++m) { // for (int n = 0; n < label_indice_[i].size(); ++n) { // int idx_m = label_indice_[i][m]; // int idx_n = label_indice_[i][n]; // // Dtype alpha = (-affinity_sample_.mutable_cpu_data()[idx_m * num + idx_n]) // // / (-sigma_) / label_indice_[i].size() / (label_indice_[i].size() - 1) / num_intra_valid_; // Dtype alpha = 1 / label_indice_[i].size() / (label_indice_[i].size() - 1) / num_intra_valid_; // caffe_cpu_axpby( // channels, // alpha, // diff_.cpu_data() + (idx_m * num + idx_n) * channels, // Dtype(1.0), // bout + (idx_m * channels)); // caffe_cpu_axpby( // channels, // -alpha, // diff_.cpu_data() + (idx_m * num + idx_n) * channels, // Dtype(1.0), // bout + (idx_n * channels)); // } // } // } // for (int i = 0; i < label_indice_.size(); ++i) { // for (int j = 0; j < label_indice_.size(); ++j) { // for (int m = 0; m < label_indice_[i].size(); ++m) { // for (int n = 0; n < label_indice_[j].size(); ++n) { // int idx_m = label_indice_[i][m]; // int idx_n = label_indice_[j][n]; // // Dtype alpha = (affinity_sample_.mutable_cpu_data()[idx_m * num + idx_n]) // // / (-sigma_) / label_indice_[i].size() / label_indice_[j].size() / label_indice_.size() / label_indice_.size(); // Dtype alpha = -1 / label_indice_[i].size() / label_indice_[j].size() / label_indice_.size() / label_indice_.size(); // caffe_cpu_axpby( // channels, // alpha, // diff_.cpu_data() + (idx_m * num + idx_n) * channels, // Dtype(1.0), // bout + (idx_m * channels)); // caffe_cpu_axpby( // channels, // -alpha, // diff_.cpu_data() + (idx_m * num + idx_n) * channels, // Dtype(1.0), // bout + (idx_n * channels)); // } // } // } // } //} /*=========================================*/ /* back propagate for contrastive loss */ /*=========================================*/ if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } /*=========================================*/ /*propagate error gradient to bottom layers*/ /*=========================================*/ //if (propagate_down[0]) { // // Dtype* bout = bottom[0]->mutable_cpu_diff(); // const Dtype* labels = bottom[1]->cpu_data(); // int num = bottom[0]->num(); // int channels = bottom[0]->channels(); // for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // if (i == j) // continue; // if (labels[i] == labels[j]) { // intra pairs // Dtype alpha = 0; // 2 * affinity_sample_.cpu_data()[i * num + j] / num_intra_valid_; // caffe_cpu_axpby( // channels, // alpha, // diff_.cpu_data() + (i * num + j) * channels, // Dtype(1.0), // bout + (i * channels)); // caffe_cpu_axpby( // channels, // -alpha, // diff_.cpu_data() + (i * num + j) * channels, // Dtype(1.0), // bout + (j * channels)); // } // else if (labels[i] != labels[j]) { // extra pairs // if (affinity_sample_.cpu_data()[i * num + j] > 0.3) { // Dtype alpha = -2 * affinity_sample_.cpu_data()[i * num + j] / num_extra_valid_; // caffe_cpu_axpby( // channels, // alpha, // diff_.cpu_data() + (i * num + j) * channels, // Dtype(1.0), // bout + (i * channels)); // caffe_cpu_axpby( // channels, // -alpha, // diff_.cpu_data() + (i * num + j) * channels, // Dtype(1.0), // bout + (j * channels)); // } // // loss_extra += max(0, affinity_sample_.cpu_data()[i * num + j] - 0.5); // } // } // } //} /*====================================*/ /* back propagate for triplet loss */ /*====================================*/ if (propagate_down[0]) { caffe_set(bottom[0]->count(), Dtype(0.0), bottom[0]->mutable_cpu_diff()); const Dtype sign = 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(num_items_); int num = bottom[0]->num(); int channels = bottom[0]->channels(); Dtype* bout = bottom[0]->mutable_cpu_diff(); for (int i = 0; i < num_items_; ++i) { //caffe_cpu_axpby( // channels, // alpha, // diff_pos_.cpu_data() + (i * channels), // Dtype(1.0), // bout + (rec_pos_[i].first * channels)); //caffe_cpu_axpby( // channels, // -alpha, // diff_pos_.cpu_data() + (i * channels), // Dtype(1.0), // bout + (rec_pos_[i].second * channels)); if (margin_ + dist_sq_.cpu_data()[i] > Dtype(0.0)) { // similar pairs caffe_cpu_axpby( channels, alpha, diff_pos_.cpu_data() + (i * channels), Dtype(1.0), bout + (rec_pos_[i].first * channels)); caffe_cpu_axpby( channels, -alpha, diff_pos_.cpu_data() + (i * channels), Dtype(1.0), bout + (rec_pos_[i].second * channels)); // dissimilar pairs caffe_cpu_axpby( channels, -alpha, diff_neg_.cpu_data() + (i * channels), Dtype(1.0), bout + (rec_neg_[i].first * channels)); caffe_cpu_axpby( channels, alpha, diff_neg_.cpu_data() + (i * channels), Dtype(1.0), bout + (rec_neg_[i].second * channels)); } } } } INSTANTIATE_LAYER_GPU_FUNCS(GraphLinkageLossLayer); } // namespace caffe
33645b47b720f746a3df7074fa1eb68b7b9eff95.cu
#include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> void GraphLinkageLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // get the nunmber of samples //int num = bottom[0]->num(); //int count = bottom[0]->count(); //const int channels = bottom[0]->channels(); //const Dtype* features = bottom[0]->cpu_data(); //const Dtype* labels = bottom[1]->cpu_data(); //// compute distance matrix //sigma_ = 0.0; //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // caffe_sub(channels, // bottom[0]->cpu_data() + (i * channels), // bottom[0]->cpu_data() + (j * channels), // diff_.mutable_cpu_data() + (i * num + j) * channels); // Dtype d_sq = caffe_cpu_dot(channels, // diff_.cpu_data() + (i * num + j) * channels, // diff_.cpu_data() + (i * num + j) * channels); // dist_sq_.mutable_cpu_data()[i * num + j] = d_sq; // sigma_ += d_sq; // } //} //sigma_ /= num * num; //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // Dtype d_sq = dist_sq_.cpu_data()[i * num + j]; // // if (d_sq < sigma_ * sigma_) { // if (1) { // affinity_sample_.mutable_cpu_data()[i * num + j] = exp(-d_sq / sigma_); // } // else { // affinity_sample_.mutable_cpu_data()[i * num + j] = 0; // } // } //} //// build label map //std::map<int, std::vector<int> > label_indice_map; //for (int i = 0; i < num; ++i) { // label_indice_map[labels[i]].push_back(i); //} //// convert label map to //vector<int> labels_idx; //vector<vector<int> > label_indice; //for (std::map<int, vector<int>>::iterator it = label_indice_map.begin(); it != label_indice_map.end(); ++it) { // labels_idx.push_back(it->first); // label_indice.push_back(it->second); //} //label_indice_ = label_indice; ////// compute the loss = loss_intra + loss_extra //affinity_intra_.Reshape(label_indice.size(), 1, 1, 1); ///// compute intra loss loss_intra //for (int i = 0; i < label_indice.size(); ++i) { // Dtype val(0.0); // //for (int m = 0; m < label_indice[i].size(); ++m) { // // for (int n = 0; n < label_indice[i].size(); ++n) { // // Dtype entry_m_n = affinity_sample_.cpu_data()[label_indice[i][m] * num + // // label_indice[i][n]]; // // Dtype entry_n_m = affinity_sample_.cpu_data()[label_indice[i][n] * num + // // label_indice[i][m]]; // // val += entry_m_n * entry_n_m; // // } // //} // for (int m = 0; m < label_indice[i].size(); ++m) { // for (int n = 0; n < label_indice[i].size(); ++n) { // if (m == n) // continue; // Dtype entry_m_n = affinity_sample_.cpu_data()[label_indice[i][m] * num + // label_indice[i][n]]; // val += entry_m_n; // } // } // affinity_intra_.mutable_cpu_data()[i] = val; // // loss_intra += 1 - val / label_indice[i].size() / label_indice[i].size(); //} ///// compute extra loss loss_extra //affinity_extra_.Reshape(label_indice.size() * label_indice.size(), 1, 1, 1); //for (int i = 0; i < label_indice.size(); ++i) { // for (int j = 0; j < label_indice.size(); ++j) { // if (i == j) { // affinity_extra_.mutable_cpu_data()[j * label_indice.size() + i] = 0; // continue; // } // Dtype A_c_i_j = 0; // //for (int m = 0; m < label_indice[i].size(); ++m) { // // Dtype s_W_c_j_i = 0; // // for (int n = 0; n < label_indice[j].size(); ++n) { // // s_W_c_j_i += affinity_sample_.cpu_data()[label_indice[j][n] * num + // // label_indice[i][m]]; // // // W_samples.at<float>(label_indice[j][n], label_indice[i][m]); // // } // // Dtype s_W_c_i_j = 0; // // for (int n = 0; n < label_indice[j].size(); ++n) { // // s_W_c_i_j += affinity_sample_.cpu_data()[label_indice[i][m] * num + // // label_indice[j][n]]; // // // W_samples.at<float>(label_indice[i][m], label_indice[j][n]); // // } // // A_c_i_j += s_W_c_j_i * s_W_c_i_j; // //} // for (int m = 0; m < label_indice[i].size(); ++m) { // Dtype s_W_c_j_i = 0; // for (int n = 0; n < label_indice[j].size(); ++n) { // s_W_c_j_i += affinity_sample_.cpu_data()[label_indice[j][n] * num + // label_indice[i][m]]; // // W_samples.at<float>(label_indice[j][n], label_indice[i][m]); // } // A_c_i_j += s_W_c_j_i; // } // affinity_extra_.mutable_cpu_data()[j * label_indice.size() + i] = A_c_i_j; // } //} //Dtype loss_intra(0.0), loss_extra(0.0); //Dtype loss(0.0); //int num_intra_valid = 0; //for (int i = 0; i < label_indice.size(); ++i) { // if (label_indice[i].size() == 1) // continue; // loss_intra += 1 - affinity_intra_.cpu_data()[i] / label_indice[i].size() / (label_indice[i].size() - 1); // ++num_intra_valid; //} //num_intra_valid_ = num_intra_valid; //for (int i = 0; i < label_indice.size(); ++i) { // for (int j = 0; j < label_indice.size(); ++j) { // loss_extra += affinity_extra_.cpu_data()[i * label_indice.size() + j] // / label_indice[i].size() / label_indice[j].size(); // A_c_i_j // } //} //loss = loss_intra / num_intra_valid + loss_extra / label_indice.size() / label_indice.size(); //top[0]->mutable_cpu_data()[0] = loss; /*===============================================================*/ /* contrastive loss */ /*===============================================================*/ //// get the nunmber of samples //int num = bottom[0]->num(); //int count = bottom[0]->count(); //const int channels = bottom[0]->channels(); //const Dtype* features = bottom[0]->cpu_data(); //const Dtype* labels = bottom[1]->cpu_data(); //// compute distance matrix //sigma_ = 0.0; //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // //Dtype norm_i = caffe_cpu_dot(channels, // // bottom[0]->cpu_data() + (i * channels), // // bottom[0]->cpu_data() + (i * channels)); // //Dtype norm_j = caffe_cpu_dot(channels, // // bottom[0]->cpu_data() + (j * channels), // // bottom[0]->cpu_data() + (j * channels)); // //caffe_scal(channels, Dtype(1.0) / sqrt(norm_i), bottom[0]->mutable_cpu_data() + (i * channels)); // //caffe_scal(channels, Dtype(1.0) / sqrt(norm_j), bottom[0]->mutable_cpu_data() + (j * channels)); // caffe_sub(channels, // bottom[0]->cpu_data() + (i * channels), // bottom[0]->cpu_data() + (j * channels), // diff_.mutable_cpu_data() + (i * num + j) * channels); // Dtype d_sq = caffe_cpu_dot(channels, // diff_.cpu_data() + (i * num + j) * channels, // diff_.cpu_data() + (i * num + j) * channels); // dist_sq_.mutable_cpu_data()[i * num + j] = d_sq; // sigma_ += d_sq; // } //} //sigma_ = sigma_ / (num * num); //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // Dtype d_sq = dist_sq_.cpu_data()[i * num + j]; // // if (d_sq < sigma_ * sigma_) { // if (1) { // affinity_sample_.mutable_cpu_data()[i * num + j] = exp(-d_sq / sigma_); // } // else { // affinity_sample_.mutable_cpu_data()[i * num + j] = 0; // } // } //} //// build label map //std::map<int, std::vector<int> > label_indice_map; //for (int i = 0; i < num; ++i) { // label_indice_map[labels[i]].push_back(i); //} //// convert label map to //vector<int> labels_idx; //vector<vector<int> > label_indice; //for (std::map<int, vector<int>>::iterator it = label_indice_map.begin(); it != label_indice_map.end(); ++it) { // labels_idx.push_back(it->first); // label_indice.push_back(it->second); //} //label_indice_ = label_indice; ////// compute the loss = loss_intra + loss_extra //Dtype loss_intra(0.0), loss_extra(0.0); //Dtype loss(0.0); //int num_intra_valid = 0; //int num_extra_valid = 0; //for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // if (i == j) // continue; // if (labels[i] == labels[j]) { // intra pairs // ++num_intra_valid; // loss_intra += (1 - affinity_sample_.cpu_data()[i * num + j]); // } // else if (labels[i] != labels[j]) { // extra pairs // ++num_extra_valid; // loss_extra += affinity_sample_.cpu_data()[i * num + j] > 0.3 ? // affinity_sample_.cpu_data()[i * num + j] - 0.3: 0; // } // } //} //num_intra_valid_ = num_intra_valid; //num_extra_valid_ = num_extra_valid; //if (num_intra_valid == 0) { // loss = loss_extra / num_extra_valid; //} //else { // loss = loss_intra / num_intra_valid + loss_extra / num_extra_valid; //} //top[0]->mutable_cpu_data()[0] = loss; /*======================================================================*/ /* triplet loss */ /*======================================================================*/ // get the nunmber of samples int num = bottom[0]->num(); int count = bottom[0]->count(); const int channels = bottom[0]->channels(); const Dtype* features = bottom[0]->cpu_data(); const Dtype* labels = bottom[1]->cpu_data(); // build label map std::map<int, std::vector<int> > label_indice_map; for (int i = 0; i < num; ++i) { label_indice_map[labels[i]].push_back(i); } // convert label map to vector<int> labels_idx; vector<vector<int> > label_indice; for (std::map<int, vector<int>>::iterator it = label_indice_map.begin(); it != label_indice_map.end(); ++it) { labels_idx.push_back(it->first); label_indice.push_back(it->second); } label_indice_ = label_indice; num_items_ = 0; // compute number of items int num_neg_sampling = label_indice.size() - 1 > 10 ? 10 : label_indice.size() - 1; for (int i = 0; i < label_indice.size(); ++i) { if (label_indice[i].size() > 1) { // num_items_ += label_indice[i].size() * (label_indice[i].size() - 1) * num_neg_sampling; } } if (num_items_ == 0) { top[0]->mutable_cpu_data()[0] = 0; return; } diff_pos_.Reshape(num_items_, channels, 1, 1); diff_neg_.Reshape(num_items_, channels, 1, 1); rec_pos_.clear(); rec_neg_.clear(); int id_items = 0; // compute triplet loss for (int i = 0; i < label_indice.size(); ++i) { if (label_indice[i].size() > 1) { // for (int m = 0; m < label_indice[i].size(); ++m) { for (int n = 0; n < label_indice[i].size(); ++n) { if (m == n) continue; int idx_m = label_indice[i][m]; int idx_n = label_indice[i][n]; // compute diffs vector<bool> is_choosed(num, false); while (1) { int idx = rand() % num; if (!is_choosed[idx] && labels[idx] != labels[idx_m]) { // compute extra diff caffe_sub(channels, bottom[0]->cpu_data() + (idx_m * channels), bottom[0]->cpu_data() + (idx_n * channels), diff_pos_.mutable_cpu_data() + id_items * channels); // compute intra diff caffe_sub(channels, bottom[0]->cpu_data() + (idx_m * channels), bottom[0]->cpu_data() + (idx * channels), diff_neg_.mutable_cpu_data() + id_items * channels); rec_pos_.push_back(make_pair(idx_m, idx_n)); rec_neg_.push_back(make_pair(idx_m, idx)); is_choosed[idx] = true; ++id_items; } if (id_items % num_neg_sampling == 0) break; } } } } } num_items_ = id_items; Dtype loss(0.0); dist_sq_pos_.Reshape(num_items_, channels, 1, 1); dist_sq_neg_.Reshape(num_items_, channels, 1, 1); dist_sq_.Reshape(num_items_, 1, 1, 1); for (int i = 0; i < num_items_; ++i) { // Triplet loss accumulation // Loss component calculated from a and b dist_sq_pos_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_pos_.cpu_data() + (i*channels), diff_pos_.cpu_data() + (i*channels)); // a b is a similar pair for triplet dist_sq_.mutable_cpu_data()[i] = dist_sq_pos_.cpu_data()[i]; // Loss component calculated from a and c dist_sq_neg_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_neg_.cpu_data() + (i*channels), diff_neg_.cpu_data() + (i*channels)); // a c is a dissimilar pair for triplet dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg_.cpu_data()[i]; loss += std::max(margin_ + dist_sq_.cpu_data()[i], Dtype(0.0)); // loss accumulated accumulated by the triplet part } loss = loss / num_items_ / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> void GraphLinkageLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // Dtype margin = this->layer_param_.contrastive_loss_param().margin(); //if (propagate_down[1]) { // LOG(FATAL) << this->type() // << " Layer cannot backpropagate to label inputs."; //} ///*=========================================*/ ///*propagate error gradient to bottom layers*/ ///*=========================================*/ //if (propagate_down[0]) { // // Dtype* bout = bottom[0]->mutable_cpu_diff(); // int num = bottom[0]->num(); // int channels = bottom[0]->channels(); // for (int i = 0; i < label_indice_.size(); ++i) { // intra error propagate // if (label_indice_[i].size() == 1) // continue; // for (int m = 0; m < label_indice_[i].size(); ++m) { // for (int n = 0; n < label_indice_[i].size(); ++n) { // int idx_m = label_indice_[i][m]; // int idx_n = label_indice_[i][n]; // // Dtype alpha = (-affinity_sample_.mutable_cpu_data()[idx_m * num + idx_n]) // // / (-sigma_) / label_indice_[i].size() / (label_indice_[i].size() - 1) / num_intra_valid_; // Dtype alpha = 1 / label_indice_[i].size() / (label_indice_[i].size() - 1) / num_intra_valid_; // caffe_cpu_axpby( // channels, // alpha, // diff_.cpu_data() + (idx_m * num + idx_n) * channels, // Dtype(1.0), // bout + (idx_m * channels)); // caffe_cpu_axpby( // channels, // -alpha, // diff_.cpu_data() + (idx_m * num + idx_n) * channels, // Dtype(1.0), // bout + (idx_n * channels)); // } // } // } // for (int i = 0; i < label_indice_.size(); ++i) { // for (int j = 0; j < label_indice_.size(); ++j) { // for (int m = 0; m < label_indice_[i].size(); ++m) { // for (int n = 0; n < label_indice_[j].size(); ++n) { // int idx_m = label_indice_[i][m]; // int idx_n = label_indice_[j][n]; // // Dtype alpha = (affinity_sample_.mutable_cpu_data()[idx_m * num + idx_n]) // // / (-sigma_) / label_indice_[i].size() / label_indice_[j].size() / label_indice_.size() / label_indice_.size(); // Dtype alpha = -1 / label_indice_[i].size() / label_indice_[j].size() / label_indice_.size() / label_indice_.size(); // caffe_cpu_axpby( // channels, // alpha, // diff_.cpu_data() + (idx_m * num + idx_n) * channels, // Dtype(1.0), // bout + (idx_m * channels)); // caffe_cpu_axpby( // channels, // -alpha, // diff_.cpu_data() + (idx_m * num + idx_n) * channels, // Dtype(1.0), // bout + (idx_n * channels)); // } // } // } // } //} /*=========================================*/ /* back propagate for contrastive loss */ /*=========================================*/ if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } /*=========================================*/ /*propagate error gradient to bottom layers*/ /*=========================================*/ //if (propagate_down[0]) { // // Dtype* bout = bottom[0]->mutable_cpu_diff(); // const Dtype* labels = bottom[1]->cpu_data(); // int num = bottom[0]->num(); // int channels = bottom[0]->channels(); // for (int i = 0; i < num; ++i) { // for (int j = 0; j < num; ++j) { // if (i == j) // continue; // if (labels[i] == labels[j]) { // intra pairs // Dtype alpha = 0; // 2 * affinity_sample_.cpu_data()[i * num + j] / num_intra_valid_; // caffe_cpu_axpby( // channels, // alpha, // diff_.cpu_data() + (i * num + j) * channels, // Dtype(1.0), // bout + (i * channels)); // caffe_cpu_axpby( // channels, // -alpha, // diff_.cpu_data() + (i * num + j) * channels, // Dtype(1.0), // bout + (j * channels)); // } // else if (labels[i] != labels[j]) { // extra pairs // if (affinity_sample_.cpu_data()[i * num + j] > 0.3) { // Dtype alpha = -2 * affinity_sample_.cpu_data()[i * num + j] / num_extra_valid_; // caffe_cpu_axpby( // channels, // alpha, // diff_.cpu_data() + (i * num + j) * channels, // Dtype(1.0), // bout + (i * channels)); // caffe_cpu_axpby( // channels, // -alpha, // diff_.cpu_data() + (i * num + j) * channels, // Dtype(1.0), // bout + (j * channels)); // } // // loss_extra += max(0, affinity_sample_.cpu_data()[i * num + j] - 0.5); // } // } // } //} /*====================================*/ /* back propagate for triplet loss */ /*====================================*/ if (propagate_down[0]) { caffe_set(bottom[0]->count(), Dtype(0.0), bottom[0]->mutable_cpu_diff()); const Dtype sign = 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(num_items_); int num = bottom[0]->num(); int channels = bottom[0]->channels(); Dtype* bout = bottom[0]->mutable_cpu_diff(); for (int i = 0; i < num_items_; ++i) { //caffe_cpu_axpby( // channels, // alpha, // diff_pos_.cpu_data() + (i * channels), // Dtype(1.0), // bout + (rec_pos_[i].first * channels)); //caffe_cpu_axpby( // channels, // -alpha, // diff_pos_.cpu_data() + (i * channels), // Dtype(1.0), // bout + (rec_pos_[i].second * channels)); if (margin_ + dist_sq_.cpu_data()[i] > Dtype(0.0)) { // similar pairs caffe_cpu_axpby( channels, alpha, diff_pos_.cpu_data() + (i * channels), Dtype(1.0), bout + (rec_pos_[i].first * channels)); caffe_cpu_axpby( channels, -alpha, diff_pos_.cpu_data() + (i * channels), Dtype(1.0), bout + (rec_pos_[i].second * channels)); // dissimilar pairs caffe_cpu_axpby( channels, -alpha, diff_neg_.cpu_data() + (i * channels), Dtype(1.0), bout + (rec_neg_[i].first * channels)); caffe_cpu_axpby( channels, alpha, diff_neg_.cpu_data() + (i * channels), Dtype(1.0), bout + (rec_neg_[i].second * channels)); } } } } INSTANTIATE_LAYER_GPU_FUNCS(GraphLinkageLossLayer); } // namespace caffe
7e4152c9faee15677aa30193354ee843ed98c333.hip
// !!! This is a file automatically generated by hipify!!! #include<stdlib.h> #include<cuda_runtime.h> typedef struct memoryPointer MemoryPointer; struct memoryPointer{ MemoryPointer *ptr; unsigned size; unsigned *data; }; static MemoryPointer base; static MemoryPointer *freep = NULL; int CHUNK_SIZE=256; int headerSize=16; int MIN_BULK_AMOUNT = 1000; //2^20 ~1million void gemtcAddList(MemoryPointer *bp){ MemoryPointer *p; for(p = freep; !(bp->data > p->data && bp->data < (p->ptr)->data); p = p->ptr) if(p->data >= (p->ptr)->data && (bp->data > p->data || bp->data < (p->ptr)->data)) break; if( (((char *)bp->data) + bp->size) == (char *)p->ptr->data){ bp->size += (p->ptr)->size; bp->ptr = (p->ptr)->ptr; cudaSafeMemcpy(bp->data, &bp->size, sizeof(unsigned), hipMemcpyHostToDevice, stream_dataIn, "Merging freed memory in old block"); if(p->ptr != &base)free(p->ptr); }else bp->ptr = p->ptr; if( (((char *)p->data) + p->size) == (char *)bp->data){ p->size += bp->size; p->ptr = bp->ptr; cudaSafeMemcpy(p->data, &p->size, sizeof(unsigned), hipMemcpyHostToDevice, stream_dataIn, "Merging old memory into new block"); free(bp); }else p->ptr = bp; freep = p; } void gemtcFree(void *loc){ loc = ((void *)(((char *)loc)-headerSize)); MemoryPointer *v = (MemoryPointer *) malloc(sizeof(MemoryPointer)); cudaSafeMemcpy(&v->size, loc, sizeof(unsigned), hipMemcpyDeviceToHost, stream_dataOut, "Reading size of freed memory"); v->data = (unsigned *) loc; gemtcAddList(v); } static MemoryPointer *morecore(unsigned nu){ void *cp; MemoryPointer *up = (MemoryPointer *)malloc(sizeof(MemoryPointer)); if (nu < MIN_BULK_AMOUNT) nu = MIN_BULK_AMOUNT; hipMalloc(&cp, nu); up->data = (unsigned *)cp; up->size = nu; cudaSafeMemcpy(cp,&nu,sizeof(unsigned),hipMemcpyHostToDevice, stream_dataIn, "Writing size of new block from hipMalloc"); gemtcAddList(up); return freep; } void *gemtcMalloc(unsigned nbytes){ MemoryPointer *p, *prevp; if ((prevp = freep)==NULL){ base.ptr = freep = prevp = &base; base.size = 0; } nbytes+=headerSize; if(nbytes%CHUNK_SIZE!=0)nbytes+=(CHUNK_SIZE-nbytes%CHUNK_SIZE); char *loc; for(p = prevp->ptr; ;prevp = p, p = p->ptr){ if(p->size >= nbytes){ if(p->size == nbytes){ prevp->ptr = p->ptr; loc = (char *) p->data; free(p); }else{ p->size -= nbytes; loc =((char *) p->data)+p->size; } freep = prevp; cudaSafeMemcpy(loc,&nbytes,sizeof(unsigned),hipMemcpyHostToDevice, stream_dataIn, "Writing size on newly allocated memory"); return (void *)(loc+headerSize); } if (p == freep) if((p = morecore(nbytes))==NULL) return NULL; } }
7e4152c9faee15677aa30193354ee843ed98c333.cu
#include<stdlib.h> #include<cuda_runtime.h> typedef struct memoryPointer MemoryPointer; struct memoryPointer{ MemoryPointer *ptr; unsigned size; unsigned *data; }; static MemoryPointer base; static MemoryPointer *freep = NULL; int CHUNK_SIZE=256; int headerSize=16; int MIN_BULK_AMOUNT = 1000; //2^20 ~1million void gemtcAddList(MemoryPointer *bp){ MemoryPointer *p; for(p = freep; !(bp->data > p->data && bp->data < (p->ptr)->data); p = p->ptr) if(p->data >= (p->ptr)->data && (bp->data > p->data || bp->data < (p->ptr)->data)) break; if( (((char *)bp->data) + bp->size) == (char *)p->ptr->data){ bp->size += (p->ptr)->size; bp->ptr = (p->ptr)->ptr; cudaSafeMemcpy(bp->data, &bp->size, sizeof(unsigned), cudaMemcpyHostToDevice, stream_dataIn, "Merging freed memory in old block"); if(p->ptr != &base)free(p->ptr); }else bp->ptr = p->ptr; if( (((char *)p->data) + p->size) == (char *)bp->data){ p->size += bp->size; p->ptr = bp->ptr; cudaSafeMemcpy(p->data, &p->size, sizeof(unsigned), cudaMemcpyHostToDevice, stream_dataIn, "Merging old memory into new block"); free(bp); }else p->ptr = bp; freep = p; } void gemtcFree(void *loc){ loc = ((void *)(((char *)loc)-headerSize)); MemoryPointer *v = (MemoryPointer *) malloc(sizeof(MemoryPointer)); cudaSafeMemcpy(&v->size, loc, sizeof(unsigned), cudaMemcpyDeviceToHost, stream_dataOut, "Reading size of freed memory"); v->data = (unsigned *) loc; gemtcAddList(v); } static MemoryPointer *morecore(unsigned nu){ void *cp; MemoryPointer *up = (MemoryPointer *)malloc(sizeof(MemoryPointer)); if (nu < MIN_BULK_AMOUNT) nu = MIN_BULK_AMOUNT; cudaMalloc(&cp, nu); up->data = (unsigned *)cp; up->size = nu; cudaSafeMemcpy(cp,&nu,sizeof(unsigned),cudaMemcpyHostToDevice, stream_dataIn, "Writing size of new block from cudaMalloc"); gemtcAddList(up); return freep; } void *gemtcMalloc(unsigned nbytes){ MemoryPointer *p, *prevp; if ((prevp = freep)==NULL){ base.ptr = freep = prevp = &base; base.size = 0; } nbytes+=headerSize; if(nbytes%CHUNK_SIZE!=0)nbytes+=(CHUNK_SIZE-nbytes%CHUNK_SIZE); char *loc; for(p = prevp->ptr; ;prevp = p, p = p->ptr){ if(p->size >= nbytes){ if(p->size == nbytes){ prevp->ptr = p->ptr; loc = (char *) p->data; free(p); }else{ p->size -= nbytes; loc =((char *) p->data)+p->size; } freep = prevp; cudaSafeMemcpy(loc,&nbytes,sizeof(unsigned),cudaMemcpyHostToDevice, stream_dataIn, "Writing size on newly allocated memory"); return (void *)(loc+headerSize); } if (p == freep) if((p = morecore(nbytes))==NULL) return NULL; } }
9aac74c29d8321e9ad3090d4387d08dde9525cb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Created by Zhiliang Zhou on 2020 // this is a simplified version of spconv // // original spconv was implemented by Yan Yan, https://github.com/traveller59/spconv // ------------------------------------------------------------------- // Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "spconv_cpp/reordering.cuh" #include "tensorview/tensor.h" #include "tensorview/torch_utils.h" #include "tensorview/cuda_utils.h" #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> namespace spconv { using float_types_t = tv::mp_list<float, double, at::Half>; using int_types_t = tv::mp_list<int32_t, int64_t>; template <typename T> using half_vec_t = std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; template <typename T> using half_vec_sadd_t = std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>; void sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { if (size <= 0){ return; } int numPlanes = features.size(1); auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); // go if dtype == float_types_t tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = half_vec_t<T>; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>( [=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { hipLaunchKernelGGL(( gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(size / NumTLP, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { hipLaunchKernelGGL(( gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; hipLaunchKernelGGL(( gatherGenericKernel<T, Index, NumTLP, NumILP>) , dim3(dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP))),dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); TV_CHECK_CUDA_ERR(); } }); }); } void sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { if (size <= 0){ return; } int numPlanes = outFeatures.size(1); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = half_vec_sadd_t<T>; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); // important for half. tv::mp_for_each<kernel_block_t>( [=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { hipLaunchKernelGGL(( scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(size / NumTLP, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, int(NumTLP), NumILP>) , dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; hipLaunchKernelGGL(( scatterAddGenericKernel<T, Index, NumTLP, NumILP>) , dim3(dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP))),dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0){ return; } int numPlanes = features.size(1); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = half_vec_t<T>; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>( [=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { hipLaunchKernelGGL(( batchGatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(size / NumTLP, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,stream, buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecBlockKernel"); } if (size - nHotBlock > 0) { hipLaunchKernelGGL(( batchGatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t>) , dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecKernel"); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; hipLaunchKernelGGL(( batchGatherGenericKernel<T, Index, NumTLP, NumILP>) , dim3(dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP))),dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0) return; int numPlanes = outFeatures.size(1); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = half_vec_sadd_t<T>; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = 1; // important for half. tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { hipLaunchKernelGGL(( batchScatterAddBlockKernel<T, Index, int(NumTLP), NumILP>) , dim3(dim3(size / NumTLP, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { hipLaunchKernelGGL(( batchScatterAddGenericKernel<T, Index, int(NumTLP), NumILP>) , dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; hipLaunchKernelGGL(( batchScatterAddGenericKernel<T, Index, NumTLP, NumILP>) , dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)), 0, stream, outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, 0, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } } // namespace spconv
9aac74c29d8321e9ad3090d4387d08dde9525cb9.cu
// Created by Zhiliang Zhou on 2020 // this is a simplified version of spconv // // original spconv was implemented by Yan Yan, https://github.com/traveller59/spconv // ------------------------------------------------------------------- // Copyright 2019 Yan Yan // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "spconv_cpp/reordering.cuh" #include "tensorview/tensor.h" #include "tensorview/torch_utils.h" #include "tensorview/cuda_utils.h" #include <c10/cuda/CUDAStream.h> namespace spconv { using float_types_t = tv::mp_list<float, double, at::Half>; using int_types_t = tv::mp_list<int32_t, int64_t>; template <typename T> using half_vec_t = std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; template <typename T> using half_vec_sadd_t = std::conditional_t<std::is_same<T, at::Half>::value, int4, int4>; using kernel_block_t = tv::mp_list_c<int, 64, 32, 16>; void sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { if (size <= 0){ return; } int numPlanes = features.size(1); auto stream = c10::cuda::getCurrentCUDAStream(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); // go if dtype == float_types_t tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = half_vec_t<T>; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>( [=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { gatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>( buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { gatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>( buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; gatherGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)),dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); TV_CHECK_CUDA_ERR(); } }); }); } void sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { if (size <= 0){ return; } int numPlanes = outFeatures.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = half_vec_sadd_t<T>; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); // important for half. tv::mp_for_each<kernel_block_t>( [=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { scatterAddVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>( outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor); TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { scatterAddGenericKernel<T, Index, int(NumTLP), NumILP> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),0, stream>>>( outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>() + nHotBlock, size - nHotBlock, numPlanes); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; scatterAddGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)),dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes); TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_gather_cuda(torch::Tensor buffer, torch::Tensor features, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0){ return; } int numPlanes = features.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = features.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = half_vec_t<T>; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(T); tv::mp_for_each<kernel_block_t>( [=, &buffer, &features, &indices, &notFound](auto NumTLP) { constexpr int NumILP = NumTLP / 4; // constexpr int NumILP = NumTLP / (64 / (NumTLP / vecloadFactor)); int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { batchGatherVecBlockKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,stream>>>( buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecBlockKernel"); } if (size - nHotBlock > 0) { batchGatherVecKernel<T, Index, int(NumTLP), NumILP, vecload_type_t> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>( buffer.data_ptr<T>() + nHotBlock * numPlanes, features.data_ptr<T>(), indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR_V2("batchGatherVecKernel"); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; batchGatherGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)),dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( buffer.data_ptr<T>(), features.data_ptr<T>(), indices.data_ptr<Index>(), size, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } void batch_sparse_scatter_add_cuda(torch::Tensor buffer, torch::Tensor outFeatures, torch::Tensor indices, int size) { // indices: [volume, inds_stride] // buffer: [volume, num_points, num_features] // size == volume * num_points if (size <= 0) return; int numPlanes = outFeatures.size(1); auto stream = at::cuda::getCurrentCUDAStream(); auto dtype = outFeatures.scalar_type(); auto inds_dtype = indices.scalar_type(); int inds_stride = indices.size(1); int feature_stride = buffer.size(1); tv::DispatchTorch<float_types_t>()(dtype, [&](auto TValue) { using T = decltype(TValue); using vecload_type_t = half_vec_sadd_t<T>; tv::DispatchTorch<int_types_t>()(inds_dtype, [&](auto IndexValue) { using Index = decltype(IndexValue); bool notFound = true; constexpr int vecloadFactor = 1; // important for half. tv::mp_for_each<kernel_block_t>([=, &outFeatures, &buffer, &indices, &notFound](auto NumTLP) { // constexpr int NumILP = NumTLP / (64 / (NumTLP / // vecloadFactor)); constexpr int NumILP = NumTLP / 4; int nHotBlock = (size / NumTLP) * NumTLP; if (notFound) { if (numPlanes % NumTLP == 0) { if (nHotBlock >= NumTLP) { batchScatterAddBlockKernel<T, Index, int(NumTLP), NumILP> <<<dim3(size / NumTLP, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), nHotBlock, numPlanes / vecloadFactor, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } if (size - nHotBlock > 0) { batchScatterAddGenericKernel<T, Index, int(NumTLP), NumILP> <<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>(outFeatures.data_ptr<T>(), buffer.data_ptr<T>() + nHotBlock * numPlanes, indices.data_ptr<Index>(), size - nHotBlock, nHotBlock, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } notFound = false; } } }); if (notFound) { constexpr int NumTLP = 64; constexpr int NumILP = NumTLP / 4; batchScatterAddGenericKernel<T, Index, NumTLP, NumILP> <<<dim3(tv::cuda::DivUp(size, NumTLP), tv::cuda::DivUp(numPlanes, NumTLP)), dim3(NumTLP / NumILP, NumTLP), 0, stream>>>( outFeatures.data_ptr<T>(), buffer.data_ptr<T>(), indices.data_ptr<Index>(), size, 0, numPlanes, inds_stride, feature_stride); TV_CHECK_CUDA_ERR(); } }); }); } } // namespace spconv
6c8b284523571514ea00ee373653104491d2587d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_top; int xdim0_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_top; int ydim0_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_top; int xdim1_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_top; int ydim1_update_halo_kernel2_zvel_plus_2_top_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_top*(y)+xdim0_update_halo_kernel2_zvel_plus_2_top*ydim0_update_halo_kernel2_zvel_plus_2_top*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_top*(y)+xdim1_update_halo_kernel2_zvel_plus_2_top*ydim1_update_halo_kernel2_zvel_plus_2_top*(z)) //user function __device__ inline void update_halo_kernel2_zvel_plus_2_top(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(0,-2,0)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(0,-2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_top( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_zvel_plus_2_top + idx_z * 1 * xdim0_update_halo_kernel2_zvel_plus_2_top * ydim0_update_halo_kernel2_zvel_plus_2_top; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_zvel_plus_2_top + idx_z * 1 * xdim1_update_halo_kernel2_zvel_plus_2_top * ydim1_update_halo_kernel2_zvel_plus_2_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_top(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(80,"update_halo_kernel2_zvel_plus_2_top"); OPS_kernels[80].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_top_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_top, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_plus_2_top_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_top, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_plus_2_top_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_top, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_plus_2_top_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_top, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_plus_2_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[80].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_2_top), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[80].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[80].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[80].transfer += ops_compute_transfer(dim, range, &arg1); }
6c8b284523571514ea00ee373653104491d2587d.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_top; int xdim0_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_top; int ydim0_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_top; int xdim1_update_halo_kernel2_zvel_plus_2_top_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_top; int ydim1_update_halo_kernel2_zvel_plus_2_top_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_top*(y)+xdim0_update_halo_kernel2_zvel_plus_2_top*ydim0_update_halo_kernel2_zvel_plus_2_top*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_top*(y)+xdim1_update_halo_kernel2_zvel_plus_2_top*ydim1_update_halo_kernel2_zvel_plus_2_top*(z)) //user function __device__ inline void update_halo_kernel2_zvel_plus_2_top(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(0,-2,0)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(0,-2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_top( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_zvel_plus_2_top + idx_z * 1 * xdim0_update_halo_kernel2_zvel_plus_2_top * ydim0_update_halo_kernel2_zvel_plus_2_top; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_zvel_plus_2_top + idx_z * 1 * xdim1_update_halo_kernel2_zvel_plus_2_top * ydim1_update_halo_kernel2_zvel_plus_2_top; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_top(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(80,"update_halo_kernel2_zvel_plus_2_top"); OPS_kernels[80].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_top_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_top, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_plus_2_top_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_top, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_plus_2_top_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_top, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_plus_2_top_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_top, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_plus_2_top_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[80].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_plus_2_top<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[80].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[80].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[80].transfer += ops_compute_transfer(dim, range, &arg1); }
6629806d2263ee0c11c991ad91c09ba810d37fae.hip
// !!! This is a file automatically generated by hipify!!! // Andre Driedger 1805536 // A2 cuda greyscale source code #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <assert.h> #include <stdint.h> #include <tiffio.h> __global__ void greyscale(float *d_out, float* r, float* g, float* b){ int tid = threadIdx.x; int bid = blockIdx.x; int id = tid*(bid+1); int I = 0.299f * r[id] + 0.587f * g[id] +0.114f * b[id]; //re-pack the data unsigned int pack = I << 24 | I << 16 | I << 8 | 255; d_out[id] = (float)pack; } int main(int argc, char **argv){ TIFF* tif = TIFFOpen(argv[1], "r"); uint32_t w, h; uint16_t bits_per_sample, photometric, planar_config, samples_per_pixel; size_t npixels; uint32_t *raster, *raster_out; TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &h); // assert(TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits_per_sample) != 0); assert(bits_per_sample == 8); assert(TIFFGetField(tif, TIFFTAG_PHOTOMETRIC, &photometric)); assert(photometric == PHOTOMETRIC_RGB); assert(TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planar_config) != 0); assert(TIFFGetField(tif, TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel)); assert(samples_per_pixel == 3); // npixels = w * h; raster = (uint32_t*) _TIFFmalloc(npixels * sizeof(uint32_t)); TIFFReadRGBAImage(tif, w, h, raster, 0); int rArr[npixels], gArr[npixels], bArr[npixels]; for(int i = 0; i<npixels; i++){ // printf("^^%u^^ ", raster[i]); // printf("%u ", TIFFGetR(raster[i])); rArr[i] = (int)TIFFGetR(raster[i]); // printf("%u ", TIFFGetG(raster[i])); gArr[i] = (int)TIFFGetG(raster[i]); // printf("%u\n", TIFFGetB(raster[i])); bArr[i] = (int)TIFFGetB(raster[i]); } float *d_out, *r, *g, *b; hipMalloc((void**) &d_out, npixels * sizeof(float)); hipMalloc((void**) &r, npixels * sizeof(float)); hipMemcpy(r, rArr, npixels * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**) &g, npixels * sizeof(float)); hipMemcpy(g, gArr, npixels * sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**) &b, npixels * sizeof(float)); hipMemcpy(b, bArr, npixels * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( greyscale), dim3(npixels/1024), dim3(1024), 0, 0, d_out, r, g, b); char* odata = (char*) malloc(npixels*sizeof(char)); hipMemcpy(odata, d_out, npixels * sizeof(char), hipMemcpyDeviceToHost); for(int i=0; i<npixels; i++){ printf("%d\n", odata[i]); } TIFF *tif_out = TIFFOpen(argv[2], "w"); assert(tif_out); assert(TIFFSetField(tif_out, TIFFTAG_IMAGEWIDTH, w)); assert(TIFFSetField(tif_out, TIFFTAG_IMAGELENGTH, h)); // assert(TIFFSetField(tif_out, TIFFTAG_BITSPERSAMPLE, bits_per_sample)); assert(TIFFSetField(tif_out, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE)); assert(TIFFSetField(tif_out, TIFFTAG_PHOTOMETRIC, photometric)); assert(TIFFSetField(tif_out, TIFFTAG_SAMPLESPERPIXEL, samples_per_pixel)); assert(TIFFSetField(tif_out, TIFFTAG_PLANARCONFIG, planar_config)); assert(TIFFSetField(tif_out, TIFFTAG_ROWSPERSTRIP, h)); // size_t on = npixels * sizeof(uint32_t); assert(TIFFWriteRawStrip(tif_out, 0, raster_out, on) == on); TIFFClose(tif_out); // free(idata); // free(odata); return 0; }
6629806d2263ee0c11c991ad91c09ba810d37fae.cu
// Andre Driedger 1805536 // A2 cuda greyscale source code #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <assert.h> #include <stdint.h> #include <tiffio.h> __global__ void greyscale(float *d_out, float* r, float* g, float* b){ int tid = threadIdx.x; int bid = blockIdx.x; int id = tid*(bid+1); int I = 0.299f * r[id] + 0.587f * g[id] +0.114f * b[id]; //re-pack the data unsigned int pack = I << 24 | I << 16 | I << 8 | 255; d_out[id] = (float)pack; } int main(int argc, char **argv){ TIFF* tif = TIFFOpen(argv[1], "r"); uint32_t w, h; uint16_t bits_per_sample, photometric, planar_config, samples_per_pixel; size_t npixels; uint32_t *raster, *raster_out; TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &h); // assert(TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits_per_sample) != 0); assert(bits_per_sample == 8); assert(TIFFGetField(tif, TIFFTAG_PHOTOMETRIC, &photometric)); assert(photometric == PHOTOMETRIC_RGB); assert(TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planar_config) != 0); assert(TIFFGetField(tif, TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel)); assert(samples_per_pixel == 3); // npixels = w * h; raster = (uint32_t*) _TIFFmalloc(npixels * sizeof(uint32_t)); TIFFReadRGBAImage(tif, w, h, raster, 0); int rArr[npixels], gArr[npixels], bArr[npixels]; for(int i = 0; i<npixels; i++){ // printf("^^%u^^ ", raster[i]); // printf("%u ", TIFFGetR(raster[i])); rArr[i] = (int)TIFFGetR(raster[i]); // printf("%u ", TIFFGetG(raster[i])); gArr[i] = (int)TIFFGetG(raster[i]); // printf("%u\n", TIFFGetB(raster[i])); bArr[i] = (int)TIFFGetB(raster[i]); } float *d_out, *r, *g, *b; cudaMalloc((void**) &d_out, npixels * sizeof(float)); cudaMalloc((void**) &r, npixels * sizeof(float)); cudaMemcpy(r, rArr, npixels * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &g, npixels * sizeof(float)); cudaMemcpy(g, gArr, npixels * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &b, npixels * sizeof(float)); cudaMemcpy(b, bArr, npixels * sizeof(float), cudaMemcpyHostToDevice); greyscale<<<npixels/1024, 1024>>>(d_out, r, g, b); char* odata = (char*) malloc(npixels*sizeof(char)); cudaMemcpy(odata, d_out, npixels * sizeof(char), cudaMemcpyDeviceToHost); for(int i=0; i<npixels; i++){ printf("%d\n", odata[i]); } TIFF *tif_out = TIFFOpen(argv[2], "w"); assert(tif_out); assert(TIFFSetField(tif_out, TIFFTAG_IMAGEWIDTH, w)); assert(TIFFSetField(tif_out, TIFFTAG_IMAGELENGTH, h)); // assert(TIFFSetField(tif_out, TIFFTAG_BITSPERSAMPLE, bits_per_sample)); assert(TIFFSetField(tif_out, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE)); assert(TIFFSetField(tif_out, TIFFTAG_PHOTOMETRIC, photometric)); assert(TIFFSetField(tif_out, TIFFTAG_SAMPLESPERPIXEL, samples_per_pixel)); assert(TIFFSetField(tif_out, TIFFTAG_PLANARCONFIG, planar_config)); assert(TIFFSetField(tif_out, TIFFTAG_ROWSPERSTRIP, h)); // size_t on = npixels * sizeof(uint32_t); assert(TIFFWriteRawStrip(tif_out, 0, raster_out, on) == on); TIFFClose(tif_out); // free(idata); // free(odata); return 0; }
ffb0f1768b7521cd6ea6e08e8b61191815ce5ad6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <cmath> #include "tfhe_core.h" #include "numeric_functions.h" #include "polynomials.h" #include <iostream> using namespace std; using namespace std; // TorusPolynomial = 0 EXPORT void torusPolynomialClear(TorusPolynomial *result) { const int N = result->N; // cout << "Original N: torusPolynomialClear: " << N << endl; for (int i = 0; i < N; ++i) result->coefsT[i] = 0; } // TorusPolynomial = random EXPORT void torusPolynomialUniform(TorusPolynomial *result) { const int N = result->N; Torus32 *x = result->coefsT; for (int i = 0; i < N; ++i) x[i] = uniformTorus32_distrib(generator); } // TorusPolynomial = TorusPolynomial EXPORT void torusPolynomialCopy( TorusPolynomial *result, const TorusPolynomial *sample) { assert(result != sample); const int N = result->N; const Torus32 *__restrict s = sample->coefsT; Torus32 *__restrict r = result->coefsT; for (int i = 0; i < N; ++i) { r[i] = s[i]; } } // TorusPolynomial + TorusPolynomial EXPORT void torusPolynomialAdd(TorusPolynomial *result, const TorusPolynomial *poly1, const TorusPolynomial *poly2) { const int N = poly1->N; assert(result != poly1); //if it fails here, please use addTo assert(result != poly2); //if it fails here, please use addTo Torus32 *__restrict r = result->coefsT; const Torus32 *__restrict a = poly1->coefsT; const Torus32 *__restrict b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] = a[i] + b[i]; } // TorusPolynomial += TorusPolynomial EXPORT void torusPolynomialAddTo(TorusPolynomial *result, const TorusPolynomial *poly2) { const int N = poly2->N; Torus32 *r = result->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] += b[i]; } //EXPORT void torusPolynomialAddTo_16(TorusPolynomial *result, int bitSize, int N, const TorusPolynomial *poly2) { // const int N = poly2->N; // Torus32 *r = result->coefsT; // const Torus32 *b = poly2->coefsT; // //// for (int i = 0; i < N; ++i) //// r[i] += b[i]; // for (int i = startIndex; i < endIndex; ++i) // r[i] += b[i]; //} __global__ void vectorAddToSelf(int * destination, const int * source, int length) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < length) { destination[id] += source[id]; } } EXPORT void torusPolynomialAddTo_gpu(TorusPolynomial *result, int bitSize, int N, const TorusPolynomial *poly2) { // const int N = poly2->N; int *r = result->coefsT; const int *b = poly2->coefsT; int length = N * bitSize; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(N*bitSize)/BLOCKSIZE); hipLaunchKernelGGL(( vectorAddToSelf), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, r, b, length); // for (int i = 0; i < N; ++i) // r[i] += b[i]; // for (int i = startIndex; i < endIndex; ++i) // r[i] += b[i]; } EXPORT void torusPolynomialAddTo_gpu_2(TorusPolynomial *result, int nOutputs, int bitSize, int N, const TorusPolynomial *poly2) { // const int N = poly2->N; int *r = result->coefsT; const int *b = poly2->coefsT; int length = nOutputs * bitSize * N; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); hipLaunchKernelGGL(( vectorAddToSelf), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, r, b, length); // for (int i = 0; i < N; ++i) // r[i] += b[i]; // for (int i = startIndex; i < endIndex; ++i) // r[i] += b[i]; } EXPORT void torusPolynomialAddTo_gpu_2_vector(TorusPolynomial *result, int vLength, int nOutputs, int bitSize, int N, const TorusPolynomial *poly2) { // const int N = poly2->N; int *r = result->coefsT; const int *b = poly2->coefsT; int length = vLength * nOutputs * bitSize * N; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); hipLaunchKernelGGL(( vectorAddToSelf), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, r, b, length); // for (int i = 0; i < N; ++i) // r[i] += b[i]; // for (int i = startIndex; i < endIndex; ++i) // r[i] += b[i]; } // TorusPolynomial - TorusPolynomial EXPORT void torusPolynomialSub(TorusPolynomial *result, const TorusPolynomial *poly1, const TorusPolynomial *poly2) { const int N = poly1->N; assert(result != poly1); //if it fails here, please use subTo assert(result != poly2); //if it fails here, please use subTo Torus32 *__restrict r = result->coefsT; const Torus32 *a = poly1->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] = a[i] - b[i]; } // TorusPolynomial -= TorusPolynomial EXPORT void torusPolynomialSubTo(TorusPolynomial *result, const TorusPolynomial *poly2) { const int N = poly2->N; Torus32 *r = result->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] -= b[i]; } // TorusPolynomial + p*TorusPolynomial EXPORT void torusPolynomialAddMulZ(TorusPolynomial *result, const TorusPolynomial *poly1, int p, const TorusPolynomial *poly2) { const int N = poly1->N; Torus32 *r = result->coefsT; const Torus32 *a = poly1->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] = a[i] + p * b[i]; } // TorusPolynomial += p*TorusPolynomial EXPORT void torusPolynomialAddMulZTo(TorusPolynomial *result, const int p, const TorusPolynomial *poly2) { const int N = poly2->N; Torus32 *r = result->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] += p * b[i]; } // TorusPolynomial - p*TorusPolynomial EXPORT void torusPolynomialSubMulZ(TorusPolynomial *result, const TorusPolynomial *poly1, const int p, const TorusPolynomial *poly2) { const int N = poly1->N; Torus32 *r = result->coefsT; const Torus32 *a = poly1->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] = a[i] - p * b[i]; } /** * * @param result : result * @param a : barai * @param source : accum */ //result= (X^{a}-1)*source EXPORT void torusPolynomialMulByXaiMinusOne(TorusPolynomial *result, int a, const TorusPolynomial *source) { const int N = source->N; Torus32 *out = result->coefsT; Torus32 *in = source->coefsT; assert(a >= 0 && a < 2 * N); if (a < N) { for (int i = 0; i < a; i++)//sur que i-a<0 out[i] = -in[i - a + N] - in[i]; for (int i = a; i < N; i++)//sur que N>i-a>=0 out[i] = in[i - a] - in[i]; } else { const int aa = a - N; for (int i = 0; i < aa; i++)//sur que i-a<0 out[i] = in[i - aa + N] - in[i]; for (int i = aa; i < N; i++)//sur que N>i-a>=0 out[i] = -in[i - aa] - in[i]; } //check intput // static int counter = 0; // int offset = 1000; // if (counter >= offset && counter < offset + 20) { // cout << "old input: "; // for (int i = 0; i < 10; ++i) { // cout << source->coefsT[i] << " "; // } // cout << endl; // } // counter++; //check output // static int counter = 0; // int offset = 1000; // if (counter >= offset && counter < offset + 20) { // cout << "old: "; // for (int i = 0; i < 10; ++i) { // cout << result->coefsT[i] << " "; // } // cout << endl; // } // counter++; } //new __global__ void torusPolynomialMulByXaiMinusOne_16_GPU(int* destination, const int* bara, int baraIndex, int bitSize, int N, int* source) { int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < bitSize * N) { int bitIndex = id / N; int startIndex = bitIndex * N; int a = bara[startIndex + baraIndex]; int threadIdModN = id % N; if (a < N) { if(threadIdModN < a) { destination[id] = -source[id - a + N] - source[id]; } else { destination[id] = source[id - a] - source[id]; } } else { const int aa = a - N; if(threadIdModN < aa) { destination[id] = source[id - aa + N] - source[id]; } else { destination[id] = -source[id - aa] - source[id]; } } } } __global__ void torusPolynomialMulByXaiMinusOne_16_GPU_2(int* destination, const int* bara, int baraIndex, int nOutputs, int bitSize, int N, int* source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int length = nOutputs * bitSize * N; if(id < length) { //bibel int bitIndex = id / N; int startIndex = bitIndex * N; int a = bara[startIndex + baraIndex]; int threadIdModN = id % N; if (a < N) { if(threadIdModN < a) { destination[id] = -source[id - a + N] - source[id]; } else { destination[id] = source[id - a] - source[id]; } } else { int aa = a - N; if(threadIdModN < aa) { destination[id] = source[id - aa + N] - source[id]; } else { destination[id] = -source[id - aa] - source[id]; } } // testing for v2 // destination[id] = bitIndex; } } __global__ void torusPolynomialMulByXaiMinusOne_16_GPU_2v2(int* destination, const int* bara, int baraIndex, int nOutputs, int bitSize, int N, int k, int* source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int length = nOutputs * bitSize * N * (k + 1); if(id < length) { int bitIndex = (id / N) % (bitSize *nOutputs); int startIndex = bitIndex * N; int a = bara[startIndex + baraIndex]; int threadIdModN = id % N; if (a < N) { if(threadIdModN < a) { destination[id] = -source[id - a + N] - source[id]; } else { destination[id] = source[id - a] - source[id]; } } else { int aa = a - N; if(threadIdModN < aa) { destination[id] = source[id - aa + N] - source[id]; } else { destination[id] = -source[id - aa] - source[id]; } } // __syncthreads(); } } __global__ void torusPolynomialMulByXaiMinusOne_16_GPU_2_vector(int* destination, const int* bara, int baraIndex, int nOutputs, int vLength, int bitSize, int N, int* source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int length = nOutputs * vLength * bitSize * N; if(id < length) { int bitIndex = (id / N) % (bitSize *nOutputs * vLength); int startIndex = bitIndex * N; int a = bara[startIndex + baraIndex]; int threadIdModN = id % N; if (a < N) { if(threadIdModN < a) { destination[id] = -source[id - a + N] - source[id]; } __syncthreads(); if(threadIdModN >= a){ destination[id] = source[id - a] - source[id]; } __syncthreads(); } __syncthreads(); if ((a >= N)) { int aa = a - N; if(threadIdModN < aa) { destination[id] = source[id - aa + N] - source[id]; } __syncthreads(); if(threadIdModN >= aa) { destination[id] = -source[id - aa] - source[id]; } __syncthreads(); } } } EXPORT void torusPolynomialMulByXaiMinusOne_16(TorusPolynomial *result, const int *bara, int baraIndex, int bitSize, int N, const TorusPolynomial *source) { // cout << "new: "; // for (int i = startIndex; i < startIndex + 10 ; ++i) { // cout << source->coefsT[i] << " "; // } // cout << endl; // Torus32 *out = result->coefsT; int *out = result->coefsT; // Torus32 *in = source->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(N*bitSize)/BLOCKSIZE); hipLaunchKernelGGL(( torusPolynomialMulByXaiMinusOne_16_GPU), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, bara, baraIndex, bitSize, N, in); // hipDeviceSynchronize(); //input // int *temp_a = new int[N*bitSize]; // hipMemcpy(temp_a, result->coefsT, N*bitSize* sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // for (int j = 0; j < 10; ++j) { // cout << temp_a[i * N + j] << " "; // } // cout << endl; // } // cout << endl; // static int counter = 0; // int bitIndex = 1; // int startIndex = bitIndex*N; // if (counter < 20) { // cout << "new input: "; // for (int i = 0; i < 10; ++i) { // cout << temp_a[startIndex + i] << " "; // } // cout << endl; // } // counter++; //output // int *temp_a = new int[N*bitSize]; // hipMemcpy(temp_a, result->coefsT, N*bitSize* sizeof(int), hipMemcpyDeviceToHost); // static int counter = 0; // int bitIndex = 1; // int startIndex = bitIndex*N; // if (counter < 20) { // cout << "new: "; // for (int i = 0; i < 10; ++i) { // cout << temp_a[startIndex + i] << " "; // } // cout << endl; // } // counter++; } EXPORT void torusPolynomialMulByXaiMinusOne_16_2(TorusPolynomial *result, const int *bara, int baraIndex, int nOutputs, int bitSize, int N, const TorusPolynomial *source) { int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int length = nOutputs * bitSize * N; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); // cout << "gridSize: " << gridSize << endl; hipLaunchKernelGGL(( torusPolynomialMulByXaiMinusOne_16_GPU_2), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, bara, baraIndex, nOutputs, bitSize, N, in); /* //input // int *temp_a = new int[N*bitSize]; // hipMemcpy(temp_a, source->coefsT, N*bitSize* sizeof(int), hipMemcpyDeviceToHost); // static int counter = 0; // int bitIndex = 1; // int startIndex = bitIndex*N; // if (counter < 20) { // cout << "new input: "; // for (int i = 0; i < 10; ++i) { // cout << temp_a[startIndex + i] << " "; // } // cout << endl; // } // counter++; //output // int *temp_a = new int[length]; // hipMemcpy(temp_a, result->coefsT, length * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < nOutputs * bitSize; ++i) { // for (int j = 0; j < 10; ++j) { // cout << temp_a[i * N + j] << " "; // } // cout << endl; // } // static int counter = 0; // int bitIndex = 1; // int startIndex = bitIndex*N; // if (counter < 20) { // cout << "new: "; // for (int i = 0; i < 10; ++i) { // cout << temp_a[startIndex + i] << " "; // } // cout << endl; // } // counter++;*/ } EXPORT void torusPolynomialMulByXaiMinusOne_16_2v2(TorusPolynomial *resultV2, const int *bara, int baraIndex, int nOutputs, int bitSize, int N, const TorusPolynomial *source) { int *out = resultV2->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int k = 1; int length = nOutputs * bitSize * N * (k + 1); int gridSize = (int)ceil((float)(length)/BLOCKSIZE); hipLaunchKernelGGL(( torusPolynomialMulByXaiMinusOne_16_GPU_2v2), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, bara, baraIndex, nOutputs, bitSize, N, k, in); } EXPORT void torusPolynomialMulByXaiMinusOne_16_2_vector(TorusPolynomial *result, const int *bara, int baraIndex, int vLength, int nOutputs, int bitSize, int N, const TorusPolynomial *source) { int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int length = vLength * nOutputs * bitSize * N; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); hipLaunchKernelGGL(( torusPolynomialMulByXaiMinusOne_16_GPU_2_vector), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, bara, baraIndex, nOutputs, vLength, bitSize, N, in); } //result= X^{a}*source EXPORT void torusPolynomialMulByXai(TorusPolynomial *result, int a, const TorusPolynomial *source) { const int N = source->N; Torus32 *out = result->coefsT; Torus32 *in = source->coefsT; assert(a >= 0 && a < 2 * N); assert(result != source); if (a < N) { for (int i = 0; i < a; i++)//sur que i-a<0 out[i] = -in[i - a + N]; for (int i = a; i < N; i++)//sur que N>i-a>=0 out[i] = in[i - a]; } else { const int aa = a - N; for (int i = 0; i < aa; i++)//sur que i-a<0 out[i] = in[i - aa + N]; for (int i = aa; i < N; i++)//sur que N>i-a>=0 out[i] = -in[i - aa]; } //test morshed start // cout << "old: "; // for (int i = 0; i < 10; ++i) { // cout << out[i] << " "; // } // cout << endl; //test morshed end } /** * * @param destination * @param N * @param _2N * @param barb * @param bitSize * @param source * @return */ //new __global__ void torusPolynomialMulByXai_16_GPU(int *destination, int N, int _2N, const int *barb, int bitSize, int *source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int bIndex = id / N; int baraIndex = id % N; if (id < (bitSize * N)) { // destination[id] = 1; int a = _2N - barb[bIndex]; if (a < N) { if (baraIndex < a) { destination[id] = -source[id - a + N]; } else { destination[id] = source[id - a]; } } else { const int aa = a - N; if (baraIndex < aa) { destination[id] = source[id - aa + N]; } else { destination[id] = -source[id - aa]; } } } } __global__ void torusPolynomialMulByXai_16_GPU_2(int *destination, int N, int _2N, const int *barb, int nOutputs, int bitSize, int *source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int bIndex = id / N; int baraIndex = id % N; if (id < nOutputs * bitSize * N) { int a = _2N - barb[bIndex]; if (a < N) { if (baraIndex < a) { destination[id] = -source[id - a + N];//barb[bIndex];// } else { destination[id] = source[id - a];//barb[bIndex];// } } else { const int aa = a - N; if (baraIndex < aa) { destination[id] = source[id - aa + N];//barb[bIndex]; } else { destination[id] = -source[id - aa];//barb[bIndex] ;// } } } } EXPORT void torusPolynomialMulByXai_16(TorusPolynomial *result, int _2N, const int *barb, int bitSize, const TorusPolynomial *source) { const int N = source->N/bitSize; int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(N * bitSize)/BLOCKSIZE); int *barb_GPU; hipMalloc(&barb_GPU, bitSize * sizeof(int)); hipMemcpy(barb_GPU, barb, bitSize * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( torusPolynomialMulByXai_16_GPU), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, N, _2N, barb_GPU, bitSize, in); hipFree(barb_GPU); // int *_tempBara = (int*)malloc(sizeof(int)*bitSize*N); // hipMemcpy(_tempBara, out, bitSize * N * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int startIndex = i*N; // for (int j = 0; j < 10; ++j) { // cout << _tempBara[startIndex + j] << " "; // } // cout << endl; // } } EXPORT void torusPolynomialMulByXai_16_2(TorusPolynomial *result, int _2N, const int *barb, int nOutputs, int bitSize, const TorusPolynomial *source) { const int N = source->N/(bitSize * nOutputs); // 1024 int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int length = nOutputs * bitSize * N; int gridSize = (int)ceil((float)(length)/BLOCKSIZE);//32 int *barb_GPU; hipMalloc(&barb_GPU, nOutputs * bitSize * sizeof(int)); hipMemcpy(barb_GPU, barb, nOutputs * bitSize * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( torusPolynomialMulByXai_16_GPU_2), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, N, _2N, barb_GPU, nOutputs, bitSize, in); hipFree(barb_GPU); // int *_tempBara = new int[length]; // hipMemcpy(_tempBara, out, length * sizeof(int), hipMemcpyDeviceToHost); // for (int i = 0; i < bitSize * nOutputs; ++i) { // int startIndex = i * N; //// cout << "new: "; // for (int j = 0; j < 10; ++j) { // cout << _tempBara[startIndex + j] << " "; // } // cout << endl; // } } EXPORT void torusPolynomialMulByXai_16_2_vector(TorusPolynomial *result, int _2N, const int *barb, int vLength, int nOutputs, int bitSize, const TorusPolynomial *source) { // cout << "torusPolynomialMulByXai_16_2_vector" << endl; // cout << "vLength: " << vLength << " nOutputs: " << nOutputs << " bitSize: " << bitSize << endl; const int N = source->N/(bitSize * nOutputs * vLength); // 1024 int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int length = vLength * nOutputs * bitSize * N; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); // cout << "gridSize: " << gridSize << endl; int *barb_GPU; hipMalloc(&barb_GPU, vLength * nOutputs * bitSize * sizeof(int)); hipMemcpy(barb_GPU, barb, vLength * nOutputs * bitSize * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( torusPolynomialMulByXai_16_GPU_2), dim3(gridSize), dim3(BLOCKSIZE), 0, 0, out, N, _2N, barb_GPU, nOutputs, bitSize * vLength, in); hipFree(barb_GPU); } // TorusPolynomial -= p*TorusPolynomial EXPORT void torusPolynomialSubMulZTo(TorusPolynomial *result, int p, const TorusPolynomial *poly2) { const int N = poly2->N; Torus32 *r = result->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] -= p * b[i]; } // Norme Euclidienne d'un IntPolynomial EXPORT double intPolynomialNormSq2(const IntPolynomial *poly) { const int N = poly->N; int temp1 = 0; for (int i = 0; i < N; ++i) { int temp0 = poly->coefs[i] * poly->coefs[i]; temp1 += temp0; } return temp1; } // Sets to zero EXPORT void intPolynomialClear(IntPolynomial *poly) { const int N = poly->N; for (int i = 0; i < N; ++i) poly->coefs[i] = 0; } // Sets to zero EXPORT void intPolynomialCopy(IntPolynomial *result, const IntPolynomial *source) { const int N = source->N; for (int i = 0; i < N; ++i) result->coefs[i] = source->coefs[i]; } /** accum += source */ EXPORT void intPolynomialAddTo(IntPolynomial *accum, const IntPolynomial *source) { const int N = source->N; for (int i = 0; i < N; ++i) accum->coefs[i] += source->coefs[i]; } /** result = (X^ai-1) * source */ EXPORT void intPolynomialMulByXaiMinusOne(IntPolynomial *result, int ai, const IntPolynomial *source) { const int N = source->N; int *out = result->coefs; int *in = source->coefs; assert(ai >= 0 && ai < 2 * N); if (ai < N) { for (int i = 0; i < ai; i++)//sur que i-a<0 out[i] = -in[i - ai + N] - in[i]; for (int i = ai; i < N; i++)//sur que N>i-a>=0 out[i] = in[i - ai] - in[i]; } else { const int aa = ai - N; for (int i = 0; i < aa; i++)//sur que i-a<0 out[i] = in[i - aa + N] - in[i]; for (int i = aa; i < N; i++)//sur que N>i-a>=0 out[i] = -in[i - aa] - in[i]; } } // Norme infini de la distance entre deux TorusPolynomial EXPORT double torusPolynomialNormInftyDist(const TorusPolynomial *poly1, const TorusPolynomial *poly2) { const int N = poly1->N; double norm = 0; // Max between the coefficients of abs(poly1-poly2) for (int i = 0; i < N; ++i) { double r = abs(t32tod(poly1->coefsT[i] - poly2->coefsT[i])); if (r > norm) { norm = r; } } return norm; } // Norme 2 d'un IntPolynomial EXPORT double intPolynomialNorm2sq(const IntPolynomial *poly) { const int N = poly->N; double norm = 0; for (int i = 0; i < N; ++i) { double r = poly->coefs[i]; norm += r * r; } return norm; } // Norme infini de la distance entre deux IntPolynomial EXPORT double intPolynomialNormInftyDist(const IntPolynomial *poly1, const IntPolynomial *poly2) { const int N = poly1->N; double norm = 0; // Max between the coefficients of abs(poly1-poly2) for (int i = 0; i < N; ++i) { double r = abs(poly1->coefs[i] - poly2->coefs[i]); if (r > norm) { norm = r; } } return norm; }
ffb0f1768b7521cd6ea6e08e8b61191815ce5ad6.cu
#include <cassert> #include <cmath> #include "tfhe_core.h" #include "numeric_functions.h" #include "polynomials.h" #include <iostream> using namespace std; using namespace std; // TorusPolynomial = 0 EXPORT void torusPolynomialClear(TorusPolynomial *result) { const int N = result->N; // cout << "Original N: torusPolynomialClear: " << N << endl; for (int i = 0; i < N; ++i) result->coefsT[i] = 0; } // TorusPolynomial = random EXPORT void torusPolynomialUniform(TorusPolynomial *result) { const int N = result->N; Torus32 *x = result->coefsT; for (int i = 0; i < N; ++i) x[i] = uniformTorus32_distrib(generator); } // TorusPolynomial = TorusPolynomial EXPORT void torusPolynomialCopy( TorusPolynomial *result, const TorusPolynomial *sample) { assert(result != sample); const int N = result->N; const Torus32 *__restrict s = sample->coefsT; Torus32 *__restrict r = result->coefsT; for (int i = 0; i < N; ++i) { r[i] = s[i]; } } // TorusPolynomial + TorusPolynomial EXPORT void torusPolynomialAdd(TorusPolynomial *result, const TorusPolynomial *poly1, const TorusPolynomial *poly2) { const int N = poly1->N; assert(result != poly1); //if it fails here, please use addTo assert(result != poly2); //if it fails here, please use addTo Torus32 *__restrict r = result->coefsT; const Torus32 *__restrict a = poly1->coefsT; const Torus32 *__restrict b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] = a[i] + b[i]; } // TorusPolynomial += TorusPolynomial EXPORT void torusPolynomialAddTo(TorusPolynomial *result, const TorusPolynomial *poly2) { const int N = poly2->N; Torus32 *r = result->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] += b[i]; } //EXPORT void torusPolynomialAddTo_16(TorusPolynomial *result, int bitSize, int N, const TorusPolynomial *poly2) { // const int N = poly2->N; // Torus32 *r = result->coefsT; // const Torus32 *b = poly2->coefsT; // //// for (int i = 0; i < N; ++i) //// r[i] += b[i]; // for (int i = startIndex; i < endIndex; ++i) // r[i] += b[i]; //} __global__ void vectorAddToSelf(int * destination, const int * source, int length) { int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < length) { destination[id] += source[id]; } } EXPORT void torusPolynomialAddTo_gpu(TorusPolynomial *result, int bitSize, int N, const TorusPolynomial *poly2) { // const int N = poly2->N; int *r = result->coefsT; const int *b = poly2->coefsT; int length = N * bitSize; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(N*bitSize)/BLOCKSIZE); vectorAddToSelf<<<gridSize, BLOCKSIZE>>>(r, b, length); // for (int i = 0; i < N; ++i) // r[i] += b[i]; // for (int i = startIndex; i < endIndex; ++i) // r[i] += b[i]; } EXPORT void torusPolynomialAddTo_gpu_2(TorusPolynomial *result, int nOutputs, int bitSize, int N, const TorusPolynomial *poly2) { // const int N = poly2->N; int *r = result->coefsT; const int *b = poly2->coefsT; int length = nOutputs * bitSize * N; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); vectorAddToSelf<<<gridSize, BLOCKSIZE>>>(r, b, length); // for (int i = 0; i < N; ++i) // r[i] += b[i]; // for (int i = startIndex; i < endIndex; ++i) // r[i] += b[i]; } EXPORT void torusPolynomialAddTo_gpu_2_vector(TorusPolynomial *result, int vLength, int nOutputs, int bitSize, int N, const TorusPolynomial *poly2) { // const int N = poly2->N; int *r = result->coefsT; const int *b = poly2->coefsT; int length = vLength * nOutputs * bitSize * N; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); vectorAddToSelf<<<gridSize, BLOCKSIZE>>>(r, b, length); // for (int i = 0; i < N; ++i) // r[i] += b[i]; // for (int i = startIndex; i < endIndex; ++i) // r[i] += b[i]; } // TorusPolynomial - TorusPolynomial EXPORT void torusPolynomialSub(TorusPolynomial *result, const TorusPolynomial *poly1, const TorusPolynomial *poly2) { const int N = poly1->N; assert(result != poly1); //if it fails here, please use subTo assert(result != poly2); //if it fails here, please use subTo Torus32 *__restrict r = result->coefsT; const Torus32 *a = poly1->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] = a[i] - b[i]; } // TorusPolynomial -= TorusPolynomial EXPORT void torusPolynomialSubTo(TorusPolynomial *result, const TorusPolynomial *poly2) { const int N = poly2->N; Torus32 *r = result->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] -= b[i]; } // TorusPolynomial + p*TorusPolynomial EXPORT void torusPolynomialAddMulZ(TorusPolynomial *result, const TorusPolynomial *poly1, int p, const TorusPolynomial *poly2) { const int N = poly1->N; Torus32 *r = result->coefsT; const Torus32 *a = poly1->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] = a[i] + p * b[i]; } // TorusPolynomial += p*TorusPolynomial EXPORT void torusPolynomialAddMulZTo(TorusPolynomial *result, const int p, const TorusPolynomial *poly2) { const int N = poly2->N; Torus32 *r = result->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] += p * b[i]; } // TorusPolynomial - p*TorusPolynomial EXPORT void torusPolynomialSubMulZ(TorusPolynomial *result, const TorusPolynomial *poly1, const int p, const TorusPolynomial *poly2) { const int N = poly1->N; Torus32 *r = result->coefsT; const Torus32 *a = poly1->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] = a[i] - p * b[i]; } /** * * @param result : result * @param a : barai * @param source : accum */ //result= (X^{a}-1)*source EXPORT void torusPolynomialMulByXaiMinusOne(TorusPolynomial *result, int a, const TorusPolynomial *source) { const int N = source->N; Torus32 *out = result->coefsT; Torus32 *in = source->coefsT; assert(a >= 0 && a < 2 * N); if (a < N) { for (int i = 0; i < a; i++)//sur que i-a<0 out[i] = -in[i - a + N] - in[i]; for (int i = a; i < N; i++)//sur que N>i-a>=0 out[i] = in[i - a] - in[i]; } else { const int aa = a - N; for (int i = 0; i < aa; i++)//sur que i-a<0 out[i] = in[i - aa + N] - in[i]; for (int i = aa; i < N; i++)//sur que N>i-a>=0 out[i] = -in[i - aa] - in[i]; } //check intput // static int counter = 0; // int offset = 1000; // if (counter >= offset && counter < offset + 20) { // cout << "old input: "; // for (int i = 0; i < 10; ++i) { // cout << source->coefsT[i] << " "; // } // cout << endl; // } // counter++; //check output // static int counter = 0; // int offset = 1000; // if (counter >= offset && counter < offset + 20) { // cout << "old: "; // for (int i = 0; i < 10; ++i) { // cout << result->coefsT[i] << " "; // } // cout << endl; // } // counter++; } //new __global__ void torusPolynomialMulByXaiMinusOne_16_GPU(int* destination, const int* bara, int baraIndex, int bitSize, int N, int* source) { int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < bitSize * N) { int bitIndex = id / N; int startIndex = bitIndex * N; int a = bara[startIndex + baraIndex]; int threadIdModN = id % N; if (a < N) { if(threadIdModN < a) { destination[id] = -source[id - a + N] - source[id]; } else { destination[id] = source[id - a] - source[id]; } } else { const int aa = a - N; if(threadIdModN < aa) { destination[id] = source[id - aa + N] - source[id]; } else { destination[id] = -source[id - aa] - source[id]; } } } } __global__ void torusPolynomialMulByXaiMinusOne_16_GPU_2(int* destination, const int* bara, int baraIndex, int nOutputs, int bitSize, int N, int* source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int length = nOutputs * bitSize * N; if(id < length) { //bibel int bitIndex = id / N; int startIndex = bitIndex * N; int a = bara[startIndex + baraIndex]; int threadIdModN = id % N; if (a < N) { if(threadIdModN < a) { destination[id] = -source[id - a + N] - source[id]; } else { destination[id] = source[id - a] - source[id]; } } else { int aa = a - N; if(threadIdModN < aa) { destination[id] = source[id - aa + N] - source[id]; } else { destination[id] = -source[id - aa] - source[id]; } } // testing for v2 // destination[id] = bitIndex; } } __global__ void torusPolynomialMulByXaiMinusOne_16_GPU_2v2(int* destination, const int* bara, int baraIndex, int nOutputs, int bitSize, int N, int k, int* source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int length = nOutputs * bitSize * N * (k + 1); if(id < length) { int bitIndex = (id / N) % (bitSize *nOutputs); int startIndex = bitIndex * N; int a = bara[startIndex + baraIndex]; int threadIdModN = id % N; if (a < N) { if(threadIdModN < a) { destination[id] = -source[id - a + N] - source[id]; } else { destination[id] = source[id - a] - source[id]; } } else { int aa = a - N; if(threadIdModN < aa) { destination[id] = source[id - aa + N] - source[id]; } else { destination[id] = -source[id - aa] - source[id]; } } // __syncthreads(); } } __global__ void torusPolynomialMulByXaiMinusOne_16_GPU_2_vector(int* destination, const int* bara, int baraIndex, int nOutputs, int vLength, int bitSize, int N, int* source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int length = nOutputs * vLength * bitSize * N; if(id < length) { int bitIndex = (id / N) % (bitSize *nOutputs * vLength); int startIndex = bitIndex * N; int a = bara[startIndex + baraIndex]; int threadIdModN = id % N; if (a < N) { if(threadIdModN < a) { destination[id] = -source[id - a + N] - source[id]; } __syncthreads(); if(threadIdModN >= a){ destination[id] = source[id - a] - source[id]; } __syncthreads(); } __syncthreads(); if ((a >= N)) { int aa = a - N; if(threadIdModN < aa) { destination[id] = source[id - aa + N] - source[id]; } __syncthreads(); if(threadIdModN >= aa) { destination[id] = -source[id - aa] - source[id]; } __syncthreads(); } } } EXPORT void torusPolynomialMulByXaiMinusOne_16(TorusPolynomial *result, const int *bara, int baraIndex, int bitSize, int N, const TorusPolynomial *source) { // cout << "new: "; // for (int i = startIndex; i < startIndex + 10 ; ++i) { // cout << source->coefsT[i] << " "; // } // cout << endl; // Torus32 *out = result->coefsT; int *out = result->coefsT; // Torus32 *in = source->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(N*bitSize)/BLOCKSIZE); torusPolynomialMulByXaiMinusOne_16_GPU<<<gridSize, BLOCKSIZE>>>(out, bara, baraIndex, bitSize, N, in); // cudaDeviceSynchronize(); //input // int *temp_a = new int[N*bitSize]; // cudaMemcpy(temp_a, result->coefsT, N*bitSize* sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // for (int j = 0; j < 10; ++j) { // cout << temp_a[i * N + j] << " "; // } // cout << endl; // } // cout << endl; // static int counter = 0; // int bitIndex = 1; // int startIndex = bitIndex*N; // if (counter < 20) { // cout << "new input: "; // for (int i = 0; i < 10; ++i) { // cout << temp_a[startIndex + i] << " "; // } // cout << endl; // } // counter++; //output // int *temp_a = new int[N*bitSize]; // cudaMemcpy(temp_a, result->coefsT, N*bitSize* sizeof(int), cudaMemcpyDeviceToHost); // static int counter = 0; // int bitIndex = 1; // int startIndex = bitIndex*N; // if (counter < 20) { // cout << "new: "; // for (int i = 0; i < 10; ++i) { // cout << temp_a[startIndex + i] << " "; // } // cout << endl; // } // counter++; } EXPORT void torusPolynomialMulByXaiMinusOne_16_2(TorusPolynomial *result, const int *bara, int baraIndex, int nOutputs, int bitSize, int N, const TorusPolynomial *source) { int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int length = nOutputs * bitSize * N; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); // cout << "gridSize: " << gridSize << endl; torusPolynomialMulByXaiMinusOne_16_GPU_2<<<gridSize, BLOCKSIZE>>>(out, bara, baraIndex, nOutputs, bitSize, N, in); /* //input // int *temp_a = new int[N*bitSize]; // cudaMemcpy(temp_a, source->coefsT, N*bitSize* sizeof(int), cudaMemcpyDeviceToHost); // static int counter = 0; // int bitIndex = 1; // int startIndex = bitIndex*N; // if (counter < 20) { // cout << "new input: "; // for (int i = 0; i < 10; ++i) { // cout << temp_a[startIndex + i] << " "; // } // cout << endl; // } // counter++; //output // int *temp_a = new int[length]; // cudaMemcpy(temp_a, result->coefsT, length * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < nOutputs * bitSize; ++i) { // for (int j = 0; j < 10; ++j) { // cout << temp_a[i * N + j] << " "; // } // cout << endl; // } // static int counter = 0; // int bitIndex = 1; // int startIndex = bitIndex*N; // if (counter < 20) { // cout << "new: "; // for (int i = 0; i < 10; ++i) { // cout << temp_a[startIndex + i] << " "; // } // cout << endl; // } // counter++;*/ } EXPORT void torusPolynomialMulByXaiMinusOne_16_2v2(TorusPolynomial *resultV2, const int *bara, int baraIndex, int nOutputs, int bitSize, int N, const TorusPolynomial *source) { int *out = resultV2->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int k = 1; int length = nOutputs * bitSize * N * (k + 1); int gridSize = (int)ceil((float)(length)/BLOCKSIZE); torusPolynomialMulByXaiMinusOne_16_GPU_2v2<<<gridSize, BLOCKSIZE>>>(out, bara, baraIndex, nOutputs, bitSize, N, k, in); } EXPORT void torusPolynomialMulByXaiMinusOne_16_2_vector(TorusPolynomial *result, const int *bara, int baraIndex, int vLength, int nOutputs, int bitSize, int N, const TorusPolynomial *source) { int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int length = vLength * nOutputs * bitSize * N; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); torusPolynomialMulByXaiMinusOne_16_GPU_2_vector<<<gridSize, BLOCKSIZE>>>(out, bara, baraIndex, nOutputs, vLength, bitSize, N, in); } //result= X^{a}*source EXPORT void torusPolynomialMulByXai(TorusPolynomial *result, int a, const TorusPolynomial *source) { const int N = source->N; Torus32 *out = result->coefsT; Torus32 *in = source->coefsT; assert(a >= 0 && a < 2 * N); assert(result != source); if (a < N) { for (int i = 0; i < a; i++)//sur que i-a<0 out[i] = -in[i - a + N]; for (int i = a; i < N; i++)//sur que N>i-a>=0 out[i] = in[i - a]; } else { const int aa = a - N; for (int i = 0; i < aa; i++)//sur que i-a<0 out[i] = in[i - aa + N]; for (int i = aa; i < N; i++)//sur que N>i-a>=0 out[i] = -in[i - aa]; } //test morshed start // cout << "old: "; // for (int i = 0; i < 10; ++i) { // cout << out[i] << " "; // } // cout << endl; //test morshed end } /** * * @param destination * @param N * @param _2N * @param barb * @param bitSize * @param source * @return */ //new __global__ void torusPolynomialMulByXai_16_GPU(int *destination, int N, int _2N, const int *barb, int bitSize, int *source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int bIndex = id / N; int baraIndex = id % N; if (id < (bitSize * N)) { // destination[id] = 1; int a = _2N - barb[bIndex]; if (a < N) { if (baraIndex < a) { destination[id] = -source[id - a + N]; } else { destination[id] = source[id - a]; } } else { const int aa = a - N; if (baraIndex < aa) { destination[id] = source[id - aa + N]; } else { destination[id] = -source[id - aa]; } } } } __global__ void torusPolynomialMulByXai_16_GPU_2(int *destination, int N, int _2N, const int *barb, int nOutputs, int bitSize, int *source) { int id = blockIdx.x*blockDim.x+threadIdx.x; int bIndex = id / N; int baraIndex = id % N; if (id < nOutputs * bitSize * N) { int a = _2N - barb[bIndex]; if (a < N) { if (baraIndex < a) { destination[id] = -source[id - a + N];//barb[bIndex];// } else { destination[id] = source[id - a];//barb[bIndex];// } } else { const int aa = a - N; if (baraIndex < aa) { destination[id] = source[id - aa + N];//barb[bIndex]; } else { destination[id] = -source[id - aa];//barb[bIndex] ;// } } } } EXPORT void torusPolynomialMulByXai_16(TorusPolynomial *result, int _2N, const int *barb, int bitSize, const TorusPolynomial *source) { const int N = source->N/bitSize; int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int gridSize = (int)ceil((float)(N * bitSize)/BLOCKSIZE); int *barb_GPU; cudaMalloc(&barb_GPU, bitSize * sizeof(int)); cudaMemcpy(barb_GPU, barb, bitSize * sizeof(int), cudaMemcpyHostToDevice); torusPolynomialMulByXai_16_GPU<<<gridSize, BLOCKSIZE>>>(out, N, _2N, barb_GPU, bitSize, in); cudaFree(barb_GPU); // int *_tempBara = (int*)malloc(sizeof(int)*bitSize*N); // cudaMemcpy(_tempBara, out, bitSize * N * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < bitSize; ++i) { // int startIndex = i*N; // for (int j = 0; j < 10; ++j) { // cout << _tempBara[startIndex + j] << " "; // } // cout << endl; // } } EXPORT void torusPolynomialMulByXai_16_2(TorusPolynomial *result, int _2N, const int *barb, int nOutputs, int bitSize, const TorusPolynomial *source) { const int N = source->N/(bitSize * nOutputs); // 1024 int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int length = nOutputs * bitSize * N; int gridSize = (int)ceil((float)(length)/BLOCKSIZE);//32 int *barb_GPU; cudaMalloc(&barb_GPU, nOutputs * bitSize * sizeof(int)); cudaMemcpy(barb_GPU, barb, nOutputs * bitSize * sizeof(int), cudaMemcpyHostToDevice); torusPolynomialMulByXai_16_GPU_2<<<gridSize, BLOCKSIZE>>>(out, N, _2N, barb_GPU, nOutputs, bitSize, in); cudaFree(barb_GPU); // int *_tempBara = new int[length]; // cudaMemcpy(_tempBara, out, length * sizeof(int), cudaMemcpyDeviceToHost); // for (int i = 0; i < bitSize * nOutputs; ++i) { // int startIndex = i * N; //// cout << "new: "; // for (int j = 0; j < 10; ++j) { // cout << _tempBara[startIndex + j] << " "; // } // cout << endl; // } } EXPORT void torusPolynomialMulByXai_16_2_vector(TorusPolynomial *result, int _2N, const int *barb, int vLength, int nOutputs, int bitSize, const TorusPolynomial *source) { // cout << "torusPolynomialMulByXai_16_2_vector" << endl; // cout << "vLength: " << vLength << " nOutputs: " << nOutputs << " bitSize: " << bitSize << endl; const int N = source->N/(bitSize * nOutputs * vLength); // 1024 int *out = result->coefsT; int *in = source->coefsT; int BLOCKSIZE = 1024; int length = vLength * nOutputs * bitSize * N; int gridSize = (int)ceil((float)(length)/BLOCKSIZE); // cout << "gridSize: " << gridSize << endl; int *barb_GPU; cudaMalloc(&barb_GPU, vLength * nOutputs * bitSize * sizeof(int)); cudaMemcpy(barb_GPU, barb, vLength * nOutputs * bitSize * sizeof(int), cudaMemcpyHostToDevice); torusPolynomialMulByXai_16_GPU_2<<<gridSize, BLOCKSIZE>>>(out, N, _2N, barb_GPU, nOutputs, bitSize * vLength, in); cudaFree(barb_GPU); } // TorusPolynomial -= p*TorusPolynomial EXPORT void torusPolynomialSubMulZTo(TorusPolynomial *result, int p, const TorusPolynomial *poly2) { const int N = poly2->N; Torus32 *r = result->coefsT; const Torus32 *b = poly2->coefsT; for (int i = 0; i < N; ++i) r[i] -= p * b[i]; } // Norme Euclidienne d'un IntPolynomial EXPORT double intPolynomialNormSq2(const IntPolynomial *poly) { const int N = poly->N; int temp1 = 0; for (int i = 0; i < N; ++i) { int temp0 = poly->coefs[i] * poly->coefs[i]; temp1 += temp0; } return temp1; } // Sets to zero EXPORT void intPolynomialClear(IntPolynomial *poly) { const int N = poly->N; for (int i = 0; i < N; ++i) poly->coefs[i] = 0; } // Sets to zero EXPORT void intPolynomialCopy(IntPolynomial *result, const IntPolynomial *source) { const int N = source->N; for (int i = 0; i < N; ++i) result->coefs[i] = source->coefs[i]; } /** accum += source */ EXPORT void intPolynomialAddTo(IntPolynomial *accum, const IntPolynomial *source) { const int N = source->N; for (int i = 0; i < N; ++i) accum->coefs[i] += source->coefs[i]; } /** result = (X^ai-1) * source */ EXPORT void intPolynomialMulByXaiMinusOne(IntPolynomial *result, int ai, const IntPolynomial *source) { const int N = source->N; int *out = result->coefs; int *in = source->coefs; assert(ai >= 0 && ai < 2 * N); if (ai < N) { for (int i = 0; i < ai; i++)//sur que i-a<0 out[i] = -in[i - ai + N] - in[i]; for (int i = ai; i < N; i++)//sur que N>i-a>=0 out[i] = in[i - ai] - in[i]; } else { const int aa = ai - N; for (int i = 0; i < aa; i++)//sur que i-a<0 out[i] = in[i - aa + N] - in[i]; for (int i = aa; i < N; i++)//sur que N>i-a>=0 out[i] = -in[i - aa] - in[i]; } } // Norme infini de la distance entre deux TorusPolynomial EXPORT double torusPolynomialNormInftyDist(const TorusPolynomial *poly1, const TorusPolynomial *poly2) { const int N = poly1->N; double norm = 0; // Max between the coefficients of abs(poly1-poly2) for (int i = 0; i < N; ++i) { double r = abs(t32tod(poly1->coefsT[i] - poly2->coefsT[i])); if (r > norm) { norm = r; } } return norm; } // Norme 2 d'un IntPolynomial EXPORT double intPolynomialNorm2sq(const IntPolynomial *poly) { const int N = poly->N; double norm = 0; for (int i = 0; i < N; ++i) { double r = poly->coefs[i]; norm += r * r; } return norm; } // Norme infini de la distance entre deux IntPolynomial EXPORT double intPolynomialNormInftyDist(const IntPolynomial *poly1, const IntPolynomial *poly2) { const int N = poly1->N; double norm = 0; // Max between the coefficients of abs(poly1-poly2) for (int i = 0; i < N; ++i) { double r = abs(poly1->coefs[i] - poly2->coefs[i]); if (r > norm) { norm = r; } } return norm; }
92db4ebec0b1b111fbc4e9a32ffd6220ea292801.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zgedensereimsplit.cu, normal z -> c, Sun Nov 20 20:20:41 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void cgedensereimsplit_kernel( int num_rows, int num_cols, magma_index_t* rowidx, magmaFloatComplex * A, magmaFloatComplex * ReA, magmaFloatComplex * ImA ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ ReA[ j ] = MAGMA_C_MAKE( MAGMA_C_REAL( A[ j ] ), 0.0 ); ImA[ j ] = MAGMA_C_MAKE( MAGMA_C_IMAG( A[ j ] ), 0.0 ); } } } /** Purpose ------- This routine takes an input matrix A in DENSE format and located on the GPU and splits it into two matrixes ReA and ImA containing the real and the imaginary contributions of A. The output matrices are allocated within the routine. Arguments --------- @param[in] A magma_c_matrix input matrix A. @param[out] ReA magma_c_matrix* output matrix contaning real contributions. @param[out] ImA magma_c_matrix* output matrix contaning complex contributions. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgedensereimsplit( magma_c_matrix A, magma_c_matrix *ReA, magma_c_matrix *ImA, magma_queue_t queue ) { magma_cmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue ); magma_cmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue ); int m = A.num_rows; int n = A.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( cgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, A.row, A.dval, ReA->dval, ImA->dval ); return MAGMA_SUCCESS; }
92db4ebec0b1b111fbc4e9a32ffd6220ea292801.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zgedensereimsplit.cu, normal z -> c, Sun Nov 20 20:20:41 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // axpy kernel for matrices stored in the MAGMA format __global__ void cgedensereimsplit_kernel( int num_rows, int num_cols, magma_index_t* rowidx, magmaFloatComplex * A, magmaFloatComplex * ReA, magmaFloatComplex * ImA ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if( row<num_rows ){ for( j=0; j<num_cols; j++ ){ ReA[ j ] = MAGMA_C_MAKE( MAGMA_C_REAL( A[ j ] ), 0.0 ); ImA[ j ] = MAGMA_C_MAKE( MAGMA_C_IMAG( A[ j ] ), 0.0 ); } } } /** Purpose ------- This routine takes an input matrix A in DENSE format and located on the GPU and splits it into two matrixes ReA and ImA containing the real and the imaginary contributions of A. The output matrices are allocated within the routine. Arguments --------- @param[in] A magma_c_matrix input matrix A. @param[out] ReA magma_c_matrix* output matrix contaning real contributions. @param[out] ImA magma_c_matrix* output matrix contaning complex contributions. @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgedensereimsplit( magma_c_matrix A, magma_c_matrix *ReA, magma_c_matrix *ImA, magma_queue_t queue ) { magma_cmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue ); magma_cmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue ); int m = A.num_rows; int n = A.num_cols; dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; cgedensereimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, A.row, A.dval, ReA->dval, ImA->dval ); return MAGMA_SUCCESS; }
31b0cedaa13bb10d73d9163137b4a3f309e4614d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/Context.h> #include "ATen/hip/HIPContext.h" #define CUDA_NUM_THREADS 512 #define THREADS_PER_BLOCK 64 #define DIM0(TENSOR) ((TENSOR).x) #define DIM1(TENSOR) ((TENSOR).y) #define DIM2(TENSOR) ((TENSOR).z) #define DIM3(TENSOR) ((TENSOR).w) #define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))]) template <typename scalar_t> __global__ void kernel_resample2d_update_output(const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } scalar_t val = 0.0f; int dim_b = DIM0(output_size); int dim_c = DIM1(output_size); int dim_h = DIM2(output_size); int dim_w = DIM3(output_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; scalar_t alpha = xf - floor(xf); // alpha scalar_t beta = yf - floor(yf); // beta int xL = max(min( int (floor(xf)), dim_w-1), 0); int xR = max(min( int (floor(xf)+1), dim_w -1), 0); int yT = max(min( int (floor(yf)), dim_h-1), 0); int yB = max(min( int (floor(yf)+1), dim_h-1), 0); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx)); val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx)); val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx)); val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx)); } } output[index] = val; } template <typename scalar_t> __global__ void kernel_resample2d_backward_input1( const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int dim_b = DIM0(gradOutput_size); int dim_c = DIM1(gradOutput_size); int dim_h = DIM2(gradOutput_size); int dim_w = DIM3(gradOutput_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; scalar_t alpha = xf - int(xf); // alpha scalar_t beta = yf - int(yf); // beta int idim_h = DIM2(input1_size); int idim_w = DIM3(input1_size); int xL = max(min( int (floor(xf)), idim_w-1), 0); int xR = max(min( int (floor(xf)+1), idim_w -1), 0); int yT = max(min( int (floor(yf)), idim_h-1), 0); int yB = max(min( int (floor(yf)+1), idim_h-1), 0); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); } } } template <typename scalar_t> __global__ void kernel_resample2d_backward_input2( const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } scalar_t output = 0.0; int kernel_rad = (kernel_size - 1)/2; int dim_b = DIM0(gradInput_size); int dim_c = DIM1(gradInput_size); int dim_h = DIM2(gradInput_size); int dim_w = DIM3(gradInput_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; int odim_c = DIM1(gradOutput_size); scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; int xL = max(min( int (floor(xf)), dim_w-1), 0); int xR = max(min( int (floor(xf)+1), dim_w -1), 0); int yT = max(min( int (floor(yf)), dim_h-1), 0); int yB = max(min( int (floor(yf)+1), dim_h-1), 0); if (c % 2) { float gamma = 1 - (xf - floor(xf)); // alpha for (int i = 0; i <= 2*kernel_rad; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); } } } } else { float gamma = 1 - (yf - floor(yf)); // alpha for (int i = 0; i <= 2*kernel_rad; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); } } } } gradInput[index] = output; } void resample2d_kernel_forward( at::Tensor& input1, at::Tensor& input2, at::Tensor& output, int kernel_size) { int n = output.numel(); const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3)); const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3)); const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3)); const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)); // TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF // AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] { hipLaunchKernelGGL(( kernel_resample2d_update_output<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() , n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, output.data<float>(), output_size, output_stride, kernel_size); // })); // TODO: ATen-equivalent check // THCudaCheck(hipGetLastError()); } void resample2d_kernel_backward( at::Tensor& input1, at::Tensor& input2, at::Tensor& gradOutput, at::Tensor& gradInput1, at::Tensor& gradInput2, int kernel_size) { int n = gradOutput.numel(); const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3)); const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3)); const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3)); const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3)); const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3)); const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3)); // AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] { hipLaunchKernelGGL(( kernel_resample2d_backward_input1<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() , n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, gradOutput.data<float>(), gradOutput_size, gradOutput_stride, gradInput1.data<float>(), gradInput1_size, gradInput1_stride, kernel_size ); // })); const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3)); const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3)); n = gradInput2.numel(); // AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] { hipLaunchKernelGGL(( kernel_resample2d_backward_input2<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() , n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, gradOutput.data<float>(), gradOutput_size, gradOutput_stride, gradInput2.data<float>(), gradInput2_size, gradInput2_stride, kernel_size ); // })); // TODO: Use the ATen equivalent to get last error // THCudaCheck(hipGetLastError()); }
31b0cedaa13bb10d73d9163137b4a3f309e4614d.cu
#include <ATen/ATen.h> #include <ATen/Context.h> #include "ATen/cuda/CUDAContext.h" #define CUDA_NUM_THREADS 512 #define THREADS_PER_BLOCK 64 #define DIM0(TENSOR) ((TENSOR).x) #define DIM1(TENSOR) ((TENSOR).y) #define DIM2(TENSOR) ((TENSOR).z) #define DIM3(TENSOR) ((TENSOR).w) #define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))]) template <typename scalar_t> __global__ void kernel_resample2d_update_output(const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } scalar_t val = 0.0f; int dim_b = DIM0(output_size); int dim_c = DIM1(output_size); int dim_h = DIM2(output_size); int dim_w = DIM3(output_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; scalar_t alpha = xf - floor(xf); // alpha scalar_t beta = yf - floor(yf); // beta int xL = max(min( int (floor(xf)), dim_w-1), 0); int xR = max(min( int (floor(xf)+1), dim_w -1), 0); int yT = max(min( int (floor(yf)), dim_h-1), 0); int yB = max(min( int (floor(yf)+1), dim_h-1), 0); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx)); val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx)); val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx)); val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx)); } } output[index] = val; } template <typename scalar_t> __global__ void kernel_resample2d_backward_input1( const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int dim_b = DIM0(gradOutput_size); int dim_c = DIM1(gradOutput_size); int dim_h = DIM2(gradOutput_size); int dim_w = DIM3(gradOutput_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; scalar_t alpha = xf - int(xf); // alpha scalar_t beta = yf - int(yf); // beta int idim_h = DIM2(input1_size); int idim_w = DIM3(input1_size); int xL = max(min( int (floor(xf)), idim_w-1), 0); int xR = max(min( int (floor(xf)+1), idim_w -1), 0); int yT = max(min( int (floor(yf)), idim_h-1), 0); int yB = max(min( int (floor(yf)+1), idim_h-1), 0); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); } } } template <typename scalar_t> __global__ void kernel_resample2d_backward_input2( const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride, const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } scalar_t output = 0.0; int kernel_rad = (kernel_size - 1)/2; int dim_b = DIM0(gradInput_size); int dim_c = DIM1(gradInput_size); int dim_h = DIM2(gradInput_size); int dim_w = DIM3(gradInput_size); int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = ( index / dim_chw ) % dim_b; int c = ( index / dim_hw ) % dim_c; int y = ( index / dim_w ) % dim_h; int x = ( index ) % dim_w; int odim_c = DIM1(gradOutput_size); scalar_t dx = DIM3_INDEX(input2, b, 0, y, x); scalar_t dy = DIM3_INDEX(input2, b, 1, y, x); scalar_t xf = static_cast<scalar_t>(x) + dx; scalar_t yf = static_cast<scalar_t>(y) + dy; int xL = max(min( int (floor(xf)), dim_w-1), 0); int xR = max(min( int (floor(xf)+1), dim_w -1), 0); int yT = max(min( int (floor(yf)), dim_h-1), 0); int yB = max(min( int (floor(yf)+1), dim_h-1), 0); if (c % 2) { float gamma = 1 - (xf - floor(xf)); // alpha for (int i = 0; i <= 2*kernel_rad; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); } } } } else { float gamma = 1 - (yf - floor(yf)); // alpha for (int i = 0; i <= 2*kernel_rad; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); } } } } gradInput[index] = output; } void resample2d_kernel_forward( at::Tensor& input1, at::Tensor& input2, at::Tensor& output, int kernel_size) { int n = output.numel(); const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3)); const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3)); const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3)); const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)); // TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF // AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] { kernel_resample2d_update_output<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>( n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, output.data<float>(), output_size, output_stride, kernel_size); // })); // TODO: ATen-equivalent check // THCudaCheck(cudaGetLastError()); } void resample2d_kernel_backward( at::Tensor& input1, at::Tensor& input2, at::Tensor& gradOutput, at::Tensor& gradInput1, at::Tensor& gradInput2, int kernel_size) { int n = gradOutput.numel(); const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3)); const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3)); const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3)); const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3)); const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3)); const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3)); // AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] { kernel_resample2d_backward_input1<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>( n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, gradOutput.data<float>(), gradOutput_size, gradOutput_stride, gradInput1.data<float>(), gradInput1_size, gradInput1_stride, kernel_size ); // })); const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3)); const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3)); n = gradInput2.numel(); // AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] { kernel_resample2d_backward_input2<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>( n, input1.data<float>(), input1_size, input1_stride, input2.data<float>(), input2_size, input2_stride, gradOutput.data<float>(), gradOutput_size, gradOutput_stride, gradInput2.data<float>(), gradInput2_size, gradInput2_stride, kernel_size ); // })); // TODO: Use the ATen equivalent to get last error // THCudaCheck(cudaGetLastError()); }
e2ab54e0ff4c1a56584c436458b36a99c6d80a2f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <reduce.h> __device__ float merge(float old,float opOutput,float *extraParams) { return opOutput + old; } __device__ float update(float old,float opOutput,float *extraParams) { return opOutput + old; } /** An op on the device @param d1 the first operator @param d2 the second operator */ __device__ float op(float d1,float d2,float *extraParams) { return op(d1,extraParams); } //an op for the kernel __device__ float op(float d1,float *extraParams) { float mean = extraParams[0]; float curr = (d1 - mean); return curr; } //post process result (for things like means etc) __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) { return reduction; } extern "C" __global__ void bias_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { transform(n,xOffset,dx,incx,extraParams,result); }
e2ab54e0ff4c1a56584c436458b36a99c6d80a2f.cu
#include <reduce.h> __device__ float merge(float old,float opOutput,float *extraParams) { return opOutput + old; } __device__ float update(float old,float opOutput,float *extraParams) { return opOutput + old; } /** An op on the device @param d1 the first operator @param d2 the second operator */ __device__ float op(float d1,float d2,float *extraParams) { return op(d1,extraParams); } //an op for the kernel __device__ float op(float d1,float *extraParams) { float mean = extraParams[0]; float curr = (d1 - mean); return curr; } //post process result (for things like means etc) __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) { return reduction; } extern "C" __global__ void bias_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { transform(n,xOffset,dx,incx,extraParams,result); }
ef93604b851434ed5cad21f0ba5271b99f0e05a0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "awkward_ByteMaskedArray_getitem_nextcarry_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int64_t *prefixed_mask = NULL; hipMalloc(&prefixed_mask, XSIZE*YSIZE); int64_t *to_carry = NULL; hipMalloc(&to_carry, XSIZE*YSIZE); int8_t *mask = NULL; hipMalloc(&mask, XSIZE*YSIZE); int64_t length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( awkward_ByteMaskedArray_getitem_nextcarry_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, prefixed_mask,to_carry,mask,length); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( awkward_ByteMaskedArray_getitem_nextcarry_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, prefixed_mask,to_carry,mask,length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( awkward_ByteMaskedArray_getitem_nextcarry_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, prefixed_mask,to_carry,mask,length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ef93604b851434ed5cad21f0ba5271b99f0e05a0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "awkward_ByteMaskedArray_getitem_nextcarry_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int64_t *prefixed_mask = NULL; cudaMalloc(&prefixed_mask, XSIZE*YSIZE); int64_t *to_carry = NULL; cudaMalloc(&to_carry, XSIZE*YSIZE); int8_t *mask = NULL; cudaMalloc(&mask, XSIZE*YSIZE); int64_t length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); awkward_ByteMaskedArray_getitem_nextcarry_kernel<<<gridBlock,threadBlock>>>(prefixed_mask,to_carry,mask,length); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { awkward_ByteMaskedArray_getitem_nextcarry_kernel<<<gridBlock,threadBlock>>>(prefixed_mask,to_carry,mask,length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { awkward_ByteMaskedArray_getitem_nextcarry_kernel<<<gridBlock,threadBlock>>>(prefixed_mask,to_carry,mask,length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7d04e0163cba6ebff10329b03c43eb2092ac0abf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #define NELEMS(x) (sizeof(x) / sizeof((x)[0])) #include <stdio.h> #include <stdlib.h> #include <string.h> #include <iostream> #include <string> using namespace std; #define SIZE 32 #ifndef PINNED #define PINNED 0 #endif // definicin de un pixel con sus tres canales de color (R,G,B) struct pixel_int_t { int r, g, b; }; // funcion auxiliar para importar una imagen unsigned char *LOAD(const string &imageName, int *width, int *height, int *comp, int desiredeviceNewImagehannels) { string imagePath = imageName; char path[imagePath.length() + 1]; strcpy(path, imagePath.c_str()); return stbi_load(path, width, height, comp, desiredeviceNewImagehannels); } // funcion auxiliar para guardar una imagen (png) void WRITEPNG(const string &imageName, int width, int height, int comp, const void *data, int quality) { string imagePath = imageName + ".png"; char path2[imagePath.length() + 1]; strcpy(path2, imagePath.c_str()); stbi_write_png(path2, width, height, comp, data, width * sizeof(char) * 3); } // Transforma una imagen almacenada en un vector de char // a una almacenada en una matriz de pixels void transformImage(const unsigned char *image, int width, int height, pixel_int_t *ret) { for (int i = 0; i < height; ++i) { for (int j = 0; j < width; j++) { int jj = j * 3; ret[i * width + j].r = image[i * width * 3 + jj]; ret[i * width + j].g = image[i * width * 3 + jj + 1]; ret[i * width + j].b = image[i * width * 3 + jj + 2]; } } } __global__ void Kernel00(int width, int height, pixel_int_t *original, unsigned char *new_image) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __const__ int kernel[5][5] = {{1, 4, 7, 4, 1}, {4, 16, 26, 16, 4}, {7, 26, 41, 26, 7}, {4, 16, 26, 16, 4}, {1, 4, 7, 4, 1}}; int kernel_value = 273; // Para cada pixel de la imagen calculamos la submatriz de pxeles que lo rodea // Y obtenemos el resultado del producto ponderado de dicha submatriz por el kernel pixel_int_t sumX{}, ans{}; sumX = ans = {.r=0, .g=0, .b=0}; int r, c; int margin_x = NELEMS(kernel) / 2; int margin_y = NELEMS(kernel[0]) / 2; for (int i = -margin_x; i < (margin_x + 1); i++) { for (int j = -margin_y; j < (margin_y + 1); j++) { r = row + i; c = col + j; r = min(max(0, r), height - 1); c = min(max(0, c), width - 1); pixel_int_t pixel = {.r=0, .g=0, .b=0}; pixel = original[r * width + c]; sumX.r += pixel.r * kernel[i + margin_x][j + margin_y]; sumX.g += pixel.g * kernel[i + margin_x][j + margin_y]; sumX.b += pixel.b * kernel[i + margin_x][j + margin_y]; } } ans.r = abs(sumX.r) / kernel_value; ans.g = abs(sumX.g) / kernel_value; ans.b = abs(sumX.b) / kernel_value; // Para evitar pequeos errores: if (ans.r > 255) ans.r = 255; if (ans.g > 255) ans.g = 255; if (ans.b > 255) ans.b = 255; if (ans.r < 0) ans.r = 0; if (ans.g < 0) ans.g = 0; if (ans.b < 0) ans.b = 0; // Una vez tenemos el valor del pixel borroso lo almacenamos en la imagen resultante new_image[row * (width * 3) + col * 3] = (unsigned char) ans.r; new_image[row * (width * 3) + col * 3 + 1] = (unsigned char) ans.g; new_image[row * (width * 3) + col * 3 + 2] = (unsigned char) ans.b; } int main(int argc, char **argv) { int width, height, comp; unsigned int numBytesA, numBytesC; unsigned int nBlocks, nThreads; float single_time, half_time, total_time; hipEvent_t E0, E1, E2, E3; string imageName, resultName; // 'blurring_times' es la cantidad de iteraciones que har el algoritmo // si no lo recibimos como parmetro le asignamos 10 por defecto int BLURRING_TIMES = 10; if (argc == 1) { imageName = "fruits.png"; resultName = "result"; } else if (argc == 2) { imageName = argv[1]; resultName = "result"; } else if (argc == 3) { imageName = argv[1]; resultName = argv[2]; } else if (argc == 4) { imageName = argv[1]; resultName = argv[2]; BLURRING_TIMES = atoi(argv[3]); } else { printf("Usage: ./exe IMAGENAME RESULTNAME BLURRING_TIMES\n"); exit(0); } // Cargamos la imagen que vamos a utilizar unsigned char *image = LOAD(imageName, &width, &height, &comp, STBI_rgb); if (image == nullptr) { throw std::runtime_error("ERROR loading: " + imageName); } numBytesA = sizeof(pixel_int_t) * width * height; numBytesC = sizeof(unsigned char) * height * width * 3; // Reservamos el espacio de memoria que ocupar la imagen resultante auto *new_image = (unsigned char *) malloc(numBytesC); if (new_image == nullptr) { throw std::runtime_error("Error in malloc.\n"); } // numero de Threads en cada dimension nThreads = SIZE; // numero de Blocks en cada dimension nBlocks = width / nThreads; dim3 dimGrid(nBlocks, nBlocks, 1); dim3 dimBlock(nThreads, nThreads, 1); hipEventCreate(&E0); hipEventCreate(&E1); hipEventCreate(&E2); hipEventCreate(&E3); // Obtener Memoria en el device pixel_int_t *deviceOriginal; unsigned char *deviceNewImage; hipMalloc((pixel_int_t **) &deviceOriginal, numBytesA); hipMalloc((unsigned char **) &deviceNewImage, numBytesC); hipEventRecord(E0, 0); hipEventSynchronize(E0); pixel_int_t *original = new pixel_int_t[width * height]; for (int i = 0; i < BLURRING_TIMES; i++) { if (i > 0) { // Transformamos la imagen de entrada en una matriz de pxeles transformImage(new_image, width, height, original); } else { // Si queremos seguir iterando necesitamos convertir la imagen resultante a una matriz de pxeles transformImage(image, width, height, original); } // Copiar datos desde el host en el device hipMemcpy(deviceOriginal, original, numBytesA, hipMemcpyHostToDevice); // Llamada al kernel Kernel00 << < dimGrid, dimBlock >> > (width, height, deviceOriginal, deviceNewImage); // Obtener el resultado desde el host hipMemcpy(new_image, deviceNewImage, numBytesC, hipMemcpyDeviceToHost); if (i + 1 == 1) { hipEventRecord(E1, 0); hipEventSynchronize(E1); } else if (i + 1 == BLURRING_TIMES / 2) { hipEventRecord(E2, 0); hipEventSynchronize(E2); } else if (i + 1 == BLURRING_TIMES) { hipEventRecord(E3, 0); hipEventSynchronize(E3); } } // Liberar Memoria del device hipFree(deviceOriginal); hipFree(deviceNewImage); hipEventElapsedTime(&single_time, E0, E1); hipEventElapsedTime(&half_time, E0, E2); hipEventElapsedTime(&total_time, E0, E3); // Guardamos la imagen resultante WRITEPNG(resultName, width, height, STBI_rgb, new_image, 255); // Libreamos memoria free(image); free(original); free(new_image); printf("\nKERNEL con imagen %dx%d\n", width, height); printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads * nThreads); printf("nBlocks: %dx%d (%d)\n", nBlocks, nBlocks, nBlocks * nBlocks); if (PINNED) printf("Usando Pinned Memory\n"); else printf("NO usa Pinned Memory\n"); printf("Tiempo 1 iteracion: %4.6f milseg\n", single_time); printf("Tiempo %d iteraciones: %4.6f milseg\n", BLURRING_TIMES / 2, half_time); printf("Tiempo %d iteraciones: %4.6f milseg\n", BLURRING_TIMES, total_time); hipEventDestroy(E0); hipEventDestroy(E1); hipEventDestroy(E2); hipEventDestroy(E3); }
7d04e0163cba6ebff10329b03c43eb2092ac0abf.cu
#define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #define NELEMS(x) (sizeof(x) / sizeof((x)[0])) #include <stdio.h> #include <stdlib.h> #include <string.h> #include <iostream> #include <string> using namespace std; #define SIZE 32 #ifndef PINNED #define PINNED 0 #endif // definición de un pixel con sus tres canales de color (R,G,B) struct pixel_int_t { int r, g, b; }; // funcion auxiliar para importar una imagen unsigned char *LOAD(const string &imageName, int *width, int *height, int *comp, int desiredeviceNewImagehannels) { string imagePath = imageName; char path[imagePath.length() + 1]; strcpy(path, imagePath.c_str()); return stbi_load(path, width, height, comp, desiredeviceNewImagehannels); } // funcion auxiliar para guardar una imagen (png) void WRITEPNG(const string &imageName, int width, int height, int comp, const void *data, int quality) { string imagePath = imageName + ".png"; char path2[imagePath.length() + 1]; strcpy(path2, imagePath.c_str()); stbi_write_png(path2, width, height, comp, data, width * sizeof(char) * 3); } // Transforma una imagen almacenada en un vector de char // a una almacenada en una matriz de pixels void transformImage(const unsigned char *image, int width, int height, pixel_int_t *ret) { for (int i = 0; i < height; ++i) { for (int j = 0; j < width; j++) { int jj = j * 3; ret[i * width + j].r = image[i * width * 3 + jj]; ret[i * width + j].g = image[i * width * 3 + jj + 1]; ret[i * width + j].b = image[i * width * 3 + jj + 2]; } } } __global__ void Kernel00(int width, int height, pixel_int_t *original, unsigned char *new_image) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; __const__ int kernel[5][5] = {{1, 4, 7, 4, 1}, {4, 16, 26, 16, 4}, {7, 26, 41, 26, 7}, {4, 16, 26, 16, 4}, {1, 4, 7, 4, 1}}; int kernel_value = 273; // Para cada pixel de la imagen calculamos la submatriz de píxeles que lo rodea // Y obtenemos el resultado del producto ponderado de dicha submatriz por el kernel pixel_int_t sumX{}, ans{}; sumX = ans = {.r=0, .g=0, .b=0}; int r, c; int margin_x = NELEMS(kernel) / 2; int margin_y = NELEMS(kernel[0]) / 2; for (int i = -margin_x; i < (margin_x + 1); i++) { for (int j = -margin_y; j < (margin_y + 1); j++) { r = row + i; c = col + j; r = min(max(0, r), height - 1); c = min(max(0, c), width - 1); pixel_int_t pixel = {.r=0, .g=0, .b=0}; pixel = original[r * width + c]; sumX.r += pixel.r * kernel[i + margin_x][j + margin_y]; sumX.g += pixel.g * kernel[i + margin_x][j + margin_y]; sumX.b += pixel.b * kernel[i + margin_x][j + margin_y]; } } ans.r = abs(sumX.r) / kernel_value; ans.g = abs(sumX.g) / kernel_value; ans.b = abs(sumX.b) / kernel_value; // Para evitar pequeños errores: if (ans.r > 255) ans.r = 255; if (ans.g > 255) ans.g = 255; if (ans.b > 255) ans.b = 255; if (ans.r < 0) ans.r = 0; if (ans.g < 0) ans.g = 0; if (ans.b < 0) ans.b = 0; // Una vez tenemos el valor del pixel borroso lo almacenamos en la imagen resultante new_image[row * (width * 3) + col * 3] = (unsigned char) ans.r; new_image[row * (width * 3) + col * 3 + 1] = (unsigned char) ans.g; new_image[row * (width * 3) + col * 3 + 2] = (unsigned char) ans.b; } int main(int argc, char **argv) { int width, height, comp; unsigned int numBytesA, numBytesC; unsigned int nBlocks, nThreads; float single_time, half_time, total_time; cudaEvent_t E0, E1, E2, E3; string imageName, resultName; // 'blurring_times' es la cantidad de iteraciones que hará el algoritmo // si no lo recibimos como parámetro le asignamos 10 por defecto int BLURRING_TIMES = 10; if (argc == 1) { imageName = "fruits.png"; resultName = "result"; } else if (argc == 2) { imageName = argv[1]; resultName = "result"; } else if (argc == 3) { imageName = argv[1]; resultName = argv[2]; } else if (argc == 4) { imageName = argv[1]; resultName = argv[2]; BLURRING_TIMES = atoi(argv[3]); } else { printf("Usage: ./exe IMAGENAME RESULTNAME BLURRING_TIMES\n"); exit(0); } // Cargamos la imagen que vamos a utilizar unsigned char *image = LOAD(imageName, &width, &height, &comp, STBI_rgb); if (image == nullptr) { throw std::runtime_error("ERROR loading: " + imageName); } numBytesA = sizeof(pixel_int_t) * width * height; numBytesC = sizeof(unsigned char) * height * width * 3; // Reservamos el espacio de memoria que ocupará la imagen resultante auto *new_image = (unsigned char *) malloc(numBytesC); if (new_image == nullptr) { throw std::runtime_error("Error in malloc.\n"); } // numero de Threads en cada dimension nThreads = SIZE; // numero de Blocks en cada dimension nBlocks = width / nThreads; dim3 dimGrid(nBlocks, nBlocks, 1); dim3 dimBlock(nThreads, nThreads, 1); cudaEventCreate(&E0); cudaEventCreate(&E1); cudaEventCreate(&E2); cudaEventCreate(&E3); // Obtener Memoria en el device pixel_int_t *deviceOriginal; unsigned char *deviceNewImage; cudaMalloc((pixel_int_t **) &deviceOriginal, numBytesA); cudaMalloc((unsigned char **) &deviceNewImage, numBytesC); cudaEventRecord(E0, 0); cudaEventSynchronize(E0); pixel_int_t *original = new pixel_int_t[width * height]; for (int i = 0; i < BLURRING_TIMES; i++) { if (i > 0) { // Transformamos la imagen de entrada en una matriz de píxeles transformImage(new_image, width, height, original); } else { // Si queremos seguir iterando necesitamos convertir la imagen resultante a una matriz de píxeles transformImage(image, width, height, original); } // Copiar datos desde el host en el device cudaMemcpy(deviceOriginal, original, numBytesA, cudaMemcpyHostToDevice); // Llamada al kernel Kernel00 << < dimGrid, dimBlock >> > (width, height, deviceOriginal, deviceNewImage); // Obtener el resultado desde el host cudaMemcpy(new_image, deviceNewImage, numBytesC, cudaMemcpyDeviceToHost); if (i + 1 == 1) { cudaEventRecord(E1, 0); cudaEventSynchronize(E1); } else if (i + 1 == BLURRING_TIMES / 2) { cudaEventRecord(E2, 0); cudaEventSynchronize(E2); } else if (i + 1 == BLURRING_TIMES) { cudaEventRecord(E3, 0); cudaEventSynchronize(E3); } } // Liberar Memoria del device cudaFree(deviceOriginal); cudaFree(deviceNewImage); cudaEventElapsedTime(&single_time, E0, E1); cudaEventElapsedTime(&half_time, E0, E2); cudaEventElapsedTime(&total_time, E0, E3); // Guardamos la imagen resultante WRITEPNG(resultName, width, height, STBI_rgb, new_image, 255); // Libreamos memoria free(image); free(original); free(new_image); printf("\nKERNEL con imagen %dx%d\n", width, height); printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads * nThreads); printf("nBlocks: %dx%d (%d)\n", nBlocks, nBlocks, nBlocks * nBlocks); if (PINNED) printf("Usando Pinned Memory\n"); else printf("NO usa Pinned Memory\n"); printf("Tiempo 1 iteracion: %4.6f milseg\n", single_time); printf("Tiempo %d iteraciones: %4.6f milseg\n", BLURRING_TIMES / 2, half_time); printf("Tiempo %d iteraciones: %4.6f milseg\n", BLURRING_TIMES, total_time); cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3); }
0455de8292080ef068e27ae01efa6f3309b67e83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2002-2020 the Network-Based Computing Laboratory * (NBCL), The Ohio State University. * * Contact: Dr. D. K. Panda (panda@cse.ohio-state.edu) * * For detailed copyright and licensing information, please refer to the * copyright file COPYRIGHT in the top level OMB directory. */ __global__ void compute_kernel(float a, float * x, float * y, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; int count = 0; if (i < N) { for(count=0; count < (N/8); count++) { y[i] = a * x[i] + y[i]; } } } extern "C" void call_kernel(float a, float * d_x, float * d_y, int N, hipStream_t * stream) { hipLaunchKernelGGL(( compute_kernel), dim3((N+255)/256), dim3(256), 0, *stream, a, d_x, d_y, N); }
0455de8292080ef068e27ae01efa6f3309b67e83.cu
/* * Copyright (C) 2002-2020 the Network-Based Computing Laboratory * (NBCL), The Ohio State University. * * Contact: Dr. D. K. Panda (panda@cse.ohio-state.edu) * * For detailed copyright and licensing information, please refer to the * copyright file COPYRIGHT in the top level OMB directory. */ __global__ void compute_kernel(float a, float * x, float * y, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; int count = 0; if (i < N) { for(count=0; count < (N/8); count++) { y[i] = a * x[i] + y[i]; } } } extern "C" void call_kernel(float a, float * d_x, float * d_y, int N, cudaStream_t * stream) { compute_kernel<<<(N+255)/256, 256, 0, *stream>>>(a, d_x, d_y, N); }
4cf7978d7100b5d75666886918e46786a118266e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /************************/ /* TEST KERNEL FUNCTION */ /************************/ __global__ void distance(int *a, int *b, int *c, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } /********/ /* MAIN */ /********/ int main(int ac, const char** av) { const int N = 1000000; int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size int* h_vec1 = (int*) malloc(N*sizeof(int)); int* h_vec2 = (int*) malloc(N*sizeof(int)); int* h_vec3 = (int*) malloc(N*sizeof(int)); int* h_vec4 = (int*) malloc(N*sizeof(int)); int* d_vec1; hipMalloc((void**)&d_vec1, N*sizeof(int)); int* d_vec2; hipMalloc((void**)&d_vec2, N*sizeof(int)); int* d_vec3; hipMalloc((void**)&d_vec3, N*sizeof(int)); for (int i=0; i<N; i++) { h_vec1[i] = 10; h_vec2[i] = 20; h_vec4[i] = h_vec1[i] + h_vec2[i]; } hipMemcpy(d_vec1, h_vec1, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_vec2, h_vec2, N*sizeof(int), hipMemcpyHostToDevice); float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, distance, 0, N); // Round up according to array size gridSize = (N + blockSize - 1) / blockSize; hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Occupancy calculator elapsed time: %3.3f ms \n", time); hipEventRecord(start, 0); hipLaunchKernelGGL(( distance), dim3(gridSize), dim3(blockSize), 0, 0, d_vec1, d_vec2, d_vec3, N); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Kernel elapsed time: %3.3f ms \n", time); printf("Blocksize %i\n", blockSize); hipMemcpy(h_vec3, d_vec3, N*sizeof(int), hipMemcpyDeviceToHost); for (int i=0; i<N; i++) { if (h_vec3[i] != h_vec4[i]) { printf("Error at i = %i! Host = %i; Device = %i\n", i, h_vec4[i], h_vec3[i]); return 1; }; } printf("Test passed\n"); return 0; }
4cf7978d7100b5d75666886918e46786a118266e.cu
#include <stdio.h> /************************/ /* TEST KERNEL FUNCTION */ /************************/ __global__ void distance(int *a, int *b, int *c, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } /********/ /* MAIN */ /********/ int main(int ac, const char** av) { const int N = 1000000; int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size int* h_vec1 = (int*) malloc(N*sizeof(int)); int* h_vec2 = (int*) malloc(N*sizeof(int)); int* h_vec3 = (int*) malloc(N*sizeof(int)); int* h_vec4 = (int*) malloc(N*sizeof(int)); int* d_vec1; cudaMalloc((void**)&d_vec1, N*sizeof(int)); int* d_vec2; cudaMalloc((void**)&d_vec2, N*sizeof(int)); int* d_vec3; cudaMalloc((void**)&d_vec3, N*sizeof(int)); for (int i=0; i<N; i++) { h_vec1[i] = 10; h_vec2[i] = 20; h_vec4[i] = h_vec1[i] + h_vec2[i]; } cudaMemcpy(d_vec1, h_vec1, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_vec2, h_vec2, N*sizeof(int), cudaMemcpyHostToDevice); float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, distance, 0, N); // Round up according to array size gridSize = (N + blockSize - 1) / blockSize; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Occupancy calculator elapsed time: %3.3f ms \n", time); cudaEventRecord(start, 0); distance<<<gridSize, blockSize>>>(d_vec1, d_vec2, d_vec3, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Kernel elapsed time: %3.3f ms \n", time); printf("Blocksize %i\n", blockSize); cudaMemcpy(h_vec3, d_vec3, N*sizeof(int), cudaMemcpyDeviceToHost); for (int i=0; i<N; i++) { if (h_vec3[i] != h_vec4[i]) { printf("Error at i = %i! Host = %i; Device = %i\n", i, h_vec4[i], h_vec3[i]); return 1; }; } printf("Test passed\n"); return 0; }
957f013f04c6deb3490f363ed8936e4427f44996.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <string> #include "triangle.cuh" #include "slicer.cuh" #include "golden.cuh" #include <vector> #include <chrono> #include <fstream> #include "bitmap.cuh" #define NOW (std::chrono::high_resolution_clock::now()) typedef std::chrono::time_point<std::chrono::high_resolution_clock> chrono_t; void checkCudaError() { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cout << "CUDA error: " << hipGetErrorString(err) << std::endl; exit(1); } } void timer_checkpoint(chrono_t & checkpoint) { #ifdef TEST chrono_t end = NOW; auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - checkpoint); std::cout << duration.count() << "ms" << std::endl; checkpoint = end; #else std::cout << std::endl; #endif } double get_duration_ms(chrono_t checkpoint) { chrono_t end = NOW; auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - checkpoint); return ((double)duration.count())/1000; } void print_ms(double t) { unsigned long t_int = (unsigned long)t; unsigned ms = t_int % 1000; t_int = t_int / 1000; unsigned s = t_int % 60; t_int = t_int / 60; unsigned min = t_int % 60; unsigned hour = t_int / 60; printf("%u:%02u:%02u.%03u", hour, min, s, ms); } int main(int argc, char* argv[]) { std::string stl_file_name; std::vector<triangle> triangles; if (argc == 2) { stl_file_name = argv[1]; } else if (argc > 2) { std::cout << "ERROR: Too many command line arguments" << std::endl; } chrono_t start = NOW; read_stl(stl_file_name,triangles); std::cout << "Reading STL file... "; timer_checkpoint(start); std::cout << "Allocating device memory... "; int num_triangles = triangles.size(); triangle* triangles_dev, * triangles_selected; // all[z][y][x] #ifdef TEST // Allocate all reauired memory size_t size = NUM_LAYERS * Y_DIM * X_DIM * sizeof(bool); #else // Allocation just enough memory for profiling size_t size = (PPS_BLOCK_HEIGHT) * Y_DIM * X_DIM * sizeof(bool); #endif bool* all = (bool*)malloc(size); bool* all_dev; hipMalloc(&all_dev, (PPS_BLOCK_HEIGHT) * Y_DIM * X_DIM * sizeof(bool)); hipMalloc(&triangles_dev, num_triangles * sizeof(triangle)); hipMalloc(&triangles_selected, num_triangles * sizeof(triangle)); hipMemcpy(triangles_dev, triangles.data(), num_triangles * sizeof(triangle), hipMemcpyHostToDevice); unsigned* out_length_d, out_length_h; hipMalloc(&out_length_d, sizeof(unsigned)); hipError_t err = hipGetLastError(); // add if (err != hipSuccess) { std::cout << "CUDA error: " << hipGetErrorString(err) << std::endl; return 1; } int threadsPerBlock = THREADS_PER_BLOCK; int blocksPerGrid; blocksPerGrid = ((PPS_BLOCK_HEIGHT) * X_DIM + threadsPerBlock - 1) / threadsPerBlock; timer_checkpoint(start); std::cout << "Running pps kernel... " << std::endl; for (unsigned layer_idx = 0; layer_idx < NUM_LAYERS; layer_idx += (PPS_BLOCK_HEIGHT)) { // Progress Estimate double elapsed_time = get_duration_ms(start); double estimate = elapsed_time / layer_idx * NUM_LAYERS; printf("Progress: %2.2f%%. Time: ", ((double)layer_idx*100)/NUM_LAYERS); print_ms(elapsed_time); printf(" / "); print_ms(estimate); printf("\n"); hipMemset(out_length_d, 0, sizeof(unsigned)); checkCudaError(); hipLaunchKernelGGL(( triangleSelect), dim3(128),dim3(128), 0, 0, triangles_dev, triangles_selected, num_triangles, out_length_d, layer_idx); checkCudaError(); hipMemcpy(&out_length_h, out_length_d, sizeof(unsigned), hipMemcpyDeviceToHost); checkCudaError(); hipLaunchKernelGGL(( pps), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, triangles_selected, out_length_h, all_dev, layer_idx); checkCudaError(); size_t copy_size = (layer_idx + (PPS_BLOCK_HEIGHT)) < NUM_LAYERS ? (PPS_BLOCK_HEIGHT) : NUM_LAYERS - layer_idx; copy_size = copy_size * X_DIM * Y_DIM * sizeof(bool); #ifdef TEST bool* host_addr = &all[X_DIM*Y_DIM*layer_idx]; #else bool* host_addr = &all[0]; #endif hipMemcpy(host_addr, all_dev, copy_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); checkCudaError(); } timer_checkpoint(start); hipFree(all_dev); hipFree(triangles_selected); hipFree(out_length_d); #ifdef TEST // checkOutput(triangles_dev, num_triangles, all); #if (WRITE_BMP == 1) Pixel black = BLACK; Pixel white = WHITE; const char outDir[] = "bmp"; char fname[128]; for (int z = 0; z < NUM_LAYERS; z++) { sprintf(fname, "%s/layer_%d.bmp", outDir, z); std::ofstream outfile(fname, std::ios::out | std::ios::binary); // Write BMP header BmpHeader header; header.setDim(X_DIM, Y_DIM); header.setRes(RESOLUTION); outfile.write((char*)&header, HEADER_SIZE); for (int y = 0; y < Y_DIM; y++) { for (int x = 0; x < X_DIM; x++) { if (all[z*X_DIM*Y_DIM + y*X_DIM + x]) outfile.write((char*) &black, 3); else outfile.write((char*) &white, 3); } } std::cout << "Writing to output file... "<< z+1 << "/" << NUM_LAYERS << "\r"; outfile.close(); } std::cout << std::endl; #endif #endif hipFree(triangles_dev); free(all); return 0; }
957f013f04c6deb3490f363ed8936e4427f44996.cu
#include <iostream> #include <string> #include "triangle.cuh" #include "slicer.cuh" #include "golden.cuh" #include <vector> #include <chrono> #include <fstream> #include "bitmap.cuh" #define NOW (std::chrono::high_resolution_clock::now()) typedef std::chrono::time_point<std::chrono::high_resolution_clock> chrono_t; void checkCudaError() { cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; exit(1); } } void timer_checkpoint(chrono_t & checkpoint) { #ifdef TEST chrono_t end = NOW; auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - checkpoint); std::cout << duration.count() << "ms" << std::endl; checkpoint = end; #else std::cout << std::endl; #endif } double get_duration_ms(chrono_t checkpoint) { chrono_t end = NOW; auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - checkpoint); return ((double)duration.count())/1000; } void print_ms(double t) { unsigned long t_int = (unsigned long)t; unsigned ms = t_int % 1000; t_int = t_int / 1000; unsigned s = t_int % 60; t_int = t_int / 60; unsigned min = t_int % 60; unsigned hour = t_int / 60; printf("%u:%02u:%02u.%03u", hour, min, s, ms); } int main(int argc, char* argv[]) { std::string stl_file_name; std::vector<triangle> triangles; if (argc == 2) { stl_file_name = argv[1]; } else if (argc > 2) { std::cout << "ERROR: Too many command line arguments" << std::endl; } chrono_t start = NOW; read_stl(stl_file_name,triangles); std::cout << "Reading STL file... "; timer_checkpoint(start); std::cout << "Allocating device memory... "; int num_triangles = triangles.size(); triangle* triangles_dev, * triangles_selected; // all[z][y][x] #ifdef TEST // Allocate all reauired memory size_t size = NUM_LAYERS * Y_DIM * X_DIM * sizeof(bool); #else // Allocation just enough memory for profiling size_t size = (PPS_BLOCK_HEIGHT) * Y_DIM * X_DIM * sizeof(bool); #endif bool* all = (bool*)malloc(size); bool* all_dev; cudaMalloc(&all_dev, (PPS_BLOCK_HEIGHT) * Y_DIM * X_DIM * sizeof(bool)); cudaMalloc(&triangles_dev, num_triangles * sizeof(triangle)); cudaMalloc(&triangles_selected, num_triangles * sizeof(triangle)); cudaMemcpy(triangles_dev, triangles.data(), num_triangles * sizeof(triangle), cudaMemcpyHostToDevice); unsigned* out_length_d, out_length_h; cudaMalloc(&out_length_d, sizeof(unsigned)); cudaError_t err = cudaGetLastError(); // add if (err != cudaSuccess) { std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; return 1; } int threadsPerBlock = THREADS_PER_BLOCK; int blocksPerGrid; blocksPerGrid = ((PPS_BLOCK_HEIGHT) * X_DIM + threadsPerBlock - 1) / threadsPerBlock; timer_checkpoint(start); std::cout << "Running pps kernel... " << std::endl; for (unsigned layer_idx = 0; layer_idx < NUM_LAYERS; layer_idx += (PPS_BLOCK_HEIGHT)) { // Progress Estimate double elapsed_time = get_duration_ms(start); double estimate = elapsed_time / layer_idx * NUM_LAYERS; printf("Progress: %2.2f%%. Time: ", ((double)layer_idx*100)/NUM_LAYERS); print_ms(elapsed_time); printf(" / "); print_ms(estimate); printf("\n"); cudaMemset(out_length_d, 0, sizeof(unsigned)); checkCudaError(); triangleSelect<<<128,128>>>(triangles_dev, triangles_selected, num_triangles, out_length_d, layer_idx); checkCudaError(); cudaMemcpy(&out_length_h, out_length_d, sizeof(unsigned), cudaMemcpyDeviceToHost); checkCudaError(); pps<<<blocksPerGrid, threadsPerBlock>>>(triangles_selected, out_length_h, all_dev, layer_idx); checkCudaError(); size_t copy_size = (layer_idx + (PPS_BLOCK_HEIGHT)) < NUM_LAYERS ? (PPS_BLOCK_HEIGHT) : NUM_LAYERS - layer_idx; copy_size = copy_size * X_DIM * Y_DIM * sizeof(bool); #ifdef TEST bool* host_addr = &all[X_DIM*Y_DIM*layer_idx]; #else bool* host_addr = &all[0]; #endif cudaMemcpy(host_addr, all_dev, copy_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); checkCudaError(); } timer_checkpoint(start); cudaFree(all_dev); cudaFree(triangles_selected); cudaFree(out_length_d); #ifdef TEST // checkOutput(triangles_dev, num_triangles, all); #if (WRITE_BMP == 1) Pixel black = BLACK; Pixel white = WHITE; const char outDir[] = "bmp"; char fname[128]; for (int z = 0; z < NUM_LAYERS; z++) { sprintf(fname, "%s/layer_%d.bmp", outDir, z); std::ofstream outfile(fname, std::ios::out | std::ios::binary); // Write BMP header BmpHeader header; header.setDim(X_DIM, Y_DIM); header.setRes(RESOLUTION); outfile.write((char*)&header, HEADER_SIZE); for (int y = 0; y < Y_DIM; y++) { for (int x = 0; x < X_DIM; x++) { if (all[z*X_DIM*Y_DIM + y*X_DIM + x]) outfile.write((char*) &black, 3); else outfile.write((char*) &white, 3); } } std::cout << "Writing to output file... "<< z+1 << "/" << NUM_LAYERS << "\r"; outfile.close(); } std::cout << std::endl; #endif #endif cudaFree(triangles_dev); free(all); return 0; }
e885824b49d0edbc02c067da200a106ca88adbe1.hip
// !!! This is a file automatically generated by hipify!!! extern "C" { #include <hip/hip_runtime.h> #include "conf.h" #include "stencil.h" } #include <stdio.h> #define ROTATE_DOWN(val,MAX) ((val-1==-1)?MAX-1:val-1) #define ROTATE_UP(val,MAX) ((val+1)%MAX) /** * GPU Device kernel for the for 2D stencil * First attempt during hackaton * M = Rows, N = Cols INCLUDING HALOS * In this version now we replace the size of the shared memory to be just 3 rows (actually 1+HALO*2) rows */ __global__ void gpu_stencil2D_4pt_hack5(double * dst, double * src,int tile_y,int rowPos,int colPos, int M, int N) { #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel begin!\n"); } #endif //Declaring the shared memory array for source extern __shared__ double shared_mem[] ; //indexes int i,j, k,curRow; //Cols * numRows/Tile * tileIndex int base_global_row = rowPos + ( tile_y* blockIdx.y ); int base_global_col = colPos + blockDim.x*blockIdx.x; int base_global_idx = N*base_global_row + base_global_col ; int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory int t = threadIdx.x; //copy the shared memory to fill the pipeline for (i = 0 ; i < 1+HALO*2 ; i ++ ){ k = base_global_idx+i*N+t; j = i*(blockDim.x+2) + t; shared_mem [j] =src[k]; if(t<2){ shared_mem[j+blockDim.x]=src[k+blockDim.x]; } // if(blockDim.x==1){ // shared_mem[j+blockDim.x+1]=src[k+blockDim.x+1]; // } } __syncthreads(); j = t+HALO; //Pipelined copy one row and process it for ( curRow = HALO; curRow < tile_y; curRow+=1 ) { //Stencil computation //top + bottom + left + right dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; __syncthreads(); //We are copying from src to shared memory. int nextRow2 = base_global_row+curRow+2; k = base_global_col+nextRow2*N+t; shared_mem [north*(blockDim.x+2)+t] =src[k]; if(t<2){ shared_mem[north*(blockDim.x+2)+t+blockDim.x]=src[k+blockDim.x]; } // if(blockDim.x==1){ // shared_mem[north*(blockDim.x+2)+t+blockDim.x+1]=src[k+blockDim.x+1]; // } center = ROTATE_UP(center,3); south = ROTATE_UP(south,3); north = ROTATE_UP(north,3); __syncthreads(); } //last row dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; #ifdef CUDA_DARTS_DEBUG if(threadIdx.x==0 && blockIdx.x ==0 ){ printf("tile_y= %d, last row=%d \n",tile_y, base_global_row + curRow); printf("blockDim.x= %d, first cols=%d \n",blockDim.x, base_global_col + threadIdx.x+1); } if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel finish!\n"); } #endif } __global__ void gpu_stencil2D_4pt_hack5_thread1(double * dst, double * src,int tile_y,int rowPos,int colPos, int M, int N) { #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel begin!\n"); } #endif //Declaring the shared memory array for source extern __shared__ double shared_mem[] ; //indexes int i,j, k,curRow; //Cols * numRows/Tile * tileIndex int base_global_row = rowPos + ( tile_y* blockIdx.y ); int base_global_col = colPos + blockDim.x*blockIdx.x; int base_global_idx = N*base_global_row + base_global_col ; int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory int t = threadIdx.x; //copy the shared memory to fill the pipeline for (i = 0 ; i < 1+HALO*2 ; i ++ ){ k = base_global_idx+i*N+t; j = i*(blockDim.x+2) + t; shared_mem [j] =src[k]; shared_mem[j+blockDim.x]=src[k+blockDim.x]; shared_mem[j+blockDim.x+1]=src[k+blockDim.x+1]; } __syncthreads(); j = t+HALO; //Pipelined copy one row and process it for ( curRow = HALO; curRow < tile_y; curRow+=1 ) { //Stencil computation //top + bottom + left + right dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; __syncthreads(); //We are copying from src to shared memory. int nextRow2 = base_global_row+curRow+2; k = base_global_col+nextRow2*N+t; shared_mem [north*(blockDim.x+2)+t] =src[k]; shared_mem[north*(blockDim.x+2)+t+blockDim.x]=src[k+blockDim.x]; shared_mem[north*(blockDim.x+2)+t+blockDim.x+1]=src[k+blockDim.x+1]; center = ROTATE_UP(center,3); south = ROTATE_UP(south,3); north = ROTATE_UP(north,3); __syncthreads(); } //last row dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; #ifdef CUDA_DARTS_DEBUG if(threadIdx.x==0 && blockIdx.x ==0 ){ printf("tile_y= %d, last row=%d \n",tile_y, base_global_row + curRow); printf("blockDim.x= %d, first cols=%d \n",blockDim.x, base_global_col + threadIdx.x+1); } if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel finish!\n"); } #endif } //__global__ void gpu_stencil2D_4pt_hack4(double * dst, double * src, int M, int N) //{ //// printf("kernel begin!\n"); // //Declaring the shared memory array for source // extern __shared__ double shared_mem[] ; // // //indexes // int i, j, k,curRow; // //Cols * numRows/Tile * tileIndex // int base_global_row = ( N ) * ( GRID_TILE_Y * blockIdx.y ); // int base_global_col = blockDim.x*blockIdx.x; // int base_global_idx = base_global_row + base_global_col ; // int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory // int t = threadIdx.x; // // //copy the shared memory to fill the pipeline // bool rowLeft = (blockIdx.y==(gridDim.y-1))&&(M%GRID_TILE_Y<3)&&(M%GRID_TILE_Y>0); // int numRowLeft =(rowLeft)?(3-M%GRID_TILE_Y):0; // bool noColsLeft = (base_global_col +t )<N; // bool noColsLeft2= (base_global_col+t+2)<N; // for (i = 0 ; i < 1+HALO*2-numRowLeft ; i ++ ){ // k = base_global_idx+i*N+t; // j = i*(blockDim.x+2) + t; // shared_mem [j] = (noColsLeft)?src[k]:0.0; // if((t<2) &&(noColsLeft)){ // shared_mem[j+blockDim.x]=src[k+blockDim.x]; // } // } // // __syncthreads(); // // int tt = (((blockIdx.y+1)*GRID_TILE_Y)>M)?(M%GRID_TILE_Y): GRID_TILE_Y; // int ss = (((M%GRID_TILE_Y)==1)&&(blockIdx.y ==(gridDim.y-2)))?-1:0; // // int lastRow = ((blockIdx.y == (gridDim.y-1))?-1:1)+tt +ss ; //// printf("lastRow:%d \n",lastRow ); // //Pipelined copy one row and process it // for ( curRow = HALO; curRow < lastRow; curRow+=1 ) // { // //Stencil computation // //top + bottom + left + right // j = threadIdx.x+HALO; // if(noColsLeft2){ // dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; // } // __syncthreads(); // //We are copying from src to shared memory. // k=base_global_idx+(curRow+2)*N+threadIdx.x; // if(k<M*N){ // shared_mem [north*(blockDim.x+2)+threadIdx.x] =(noColsLeft)? src[k]:0.0; // } // if((t<2)&&(noColsLeft)&&(k<M*N)){ // shared_mem[north*(blockDim.x+2)+threadIdx.x+blockDim.x]=src[k+blockDim.x]; // } // center = ROTATE_UP(center,3); // south = ROTATE_UP(south,3); // north = ROTATE_UP(north,3); // __syncthreads(); // } // //// printf("kernel finish!\n"); //} __global__ void gpu_stencil2D_4pt_hack4(double * dst, double * src, int M, int N) { #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel begin!\n"); } #endif //Declaring the shared memory array for source extern __shared__ double shared_mem[] ; //indexes int i,j, k,curRow; //Cols * numRows/Tile * tileIndex int base_global_row = ( GRID_TILE_Y * blockIdx.y ); int base_global_col = blockDim.x*blockIdx.x; int base_global_idx = N*base_global_row + base_global_col ; int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory int t = threadIdx.x; //copy the shared memory to fill the pipeline bool legalCol = (base_global_col +t )<N; bool legalCol2= (base_global_col+t+2)<N; bool legalColn= (base_global_col+t+blockDim.x)<N; for (i = 0 ; i < 1+HALO*2 ; i ++ ){ k = base_global_idx+i*N+t; j = i*(blockDim.x+2) + t; bool legalRow = (base_global_row+i)<M; shared_mem [j] =legalRow?( legalCol?src[k]:0.0):0.0; if((t<2)&&legalColn&&legalRow){ shared_mem[j+blockDim.x]=src[k+blockDim.x]; } } __syncthreads(); //Pipelined copy one row and process it for ( curRow = HALO; curRow < GRID_TILE_Y+1; curRow+=1 ) { //Stencil computation //top + bottom + left + right j = threadIdx.x+HALO; bool legalRow1 =( base_global_row+curRow+1)<M; if((legalCol2)&&(legalRow1)){ dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; } __syncthreads(); //We are copying from src to shared memory. int nextRow2 = base_global_row+curRow+2; bool legalRow2 = nextRow2<M; k = base_global_col+nextRow2*N+t; shared_mem [north*(blockDim.x+2)+t] =(legalRow2&&legalCol)?src[k]:0.0; if((t<2)&&legalColn&&legalRow2){ shared_mem[north*(blockDim.x+2)+t+blockDim.x]=src[k+blockDim.x]; } center = ROTATE_UP(center,3); south = ROTATE_UP(south,3); north = ROTATE_UP(north,3); __syncthreads(); } #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel finish!\n"); } #endif } __global__ void gpu_stencil2D_4pt_hack2(double * dst, double * src, int M, int N) { // printf("kernel begin!\n"); //Declaring the shared memory array for source __shared__ double shared_mem[ 1 + HALO*2 ] [ GRID_TILE_X + HALO*2]; //1 is the row I am modifying //double * shSrc = shared_mem; //indexes int i, j, curRow; //Cols * numRows/Tile * tileIndex int base_global_idx = ( N ) * ( GRID_TILE_Y * blockIdx.y ) + GRID_TILE_X*blockIdx.x; int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory //copy the shared memory to fill the pipeline for (i = 0 ; i < 1+HALO*2 ; i ++ ) for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x) { shared_mem [i][j] = src[base_global_idx + i*N + j]; } __syncthreads(); //Pipelined copy one row and process it for ( curRow = HALO; curRow < GRID_TILE_Y; curRow+=1 ) { //Stencil computation for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x) { //top + bottom + left + right dst[base_global_idx + curRow*N + j] = (shared_mem[north][j] + shared_mem[south][j] + shared_mem[center][j-1] + shared_mem[center][j+1] )/5.5; } __syncthreads(); //We are copying from dst to shared memory. for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x) { shared_mem [north][j] = src[base_global_idx + (curRow+2)*N + j]; } center = ROTATE_UP(center,3); south = ROTATE_UP(south,3); north = ROTATE_UP(north,3); __syncthreads(); } //Dranning the pipeline for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x) { //top + bottom + left + right dst[base_global_idx + curRow*N + j] = (shared_mem[north][j] + shared_mem[south][j] + shared_mem[center][j-1] + shared_mem[center][j+1] )/5.5; } __syncthreads(); // printf("kernel finish!\n"); } ///** // * GPU Device kernel for the for 2D stencil // * First attempt during hackaton // * M = Rows, N = Cols INCLUDING HALOS // */ //__global__ void gpu_stencil2D_4pt_hack1(double * dst, double * src, int M, int N) //{ // // //Declaring the shared memory array for source // __shared__ double shared_mem[GRID_TILE_Y + HALO*2 ] [ GRID_TILE_X + HALO*2]; // //double * shSrc = shared_mem; // // //indexes // int i, j; // // //Cols * numRows/Tile * tileIndex // int base_global_idx = ( N ) * ( GRID_TILE_Y * blockIdx.y ) + GRID_TILE_X*blockIdx.x; // // //We are copying from dst to shared memory. // for (i = 0 ; i < GRID_TILE_Y+2*HALO ; i ++ ) // for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x) // { // shared_mem [i][j] = src[base_global_idx + i*N + j]; // } // // __syncthreads(); // // //Stencil computation // for (i = HALO ; i < GRID_TILE_Y+HALO ; i ++ ) // for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x) // { // //top + bottom + left + right // dst[base_global_idx + i*N + j] = (shared_mem[i-1][j] + shared_mem[i+1][j] + shared_mem[i][j-1] + shared_mem[i][j+1] )/5.5; // } // // __syncthreads(); //} /** * GPU Device kernel for the for 2D stencil * M = Rows, N = Cols */ __global__ void gpu_stencil2D_4pt(double * dst, double * src, int M, int N) { //Declaring the shared memory array for source extern __shared__ double shared_mem[]; double * shSrc = shared_mem; //indexes int i, j; //neighbor's values double north, south, east, west; //SharedMem Collumns Dimension int smColDim = HALO*2+blockDim.y*TILE_SIZE; int smRowDim = HALO*2+blockDim.x*TILE_SIZE; //Copying to shared memory //Inner part for ( i = 0 ; i < TILE_SIZE ; i++ ) { for ( j = 0 ; j < TILE_SIZE ; j++ ) { int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO; int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j; shSrc[shMemIndex]=src[globalIndex]; } } //Halos if (threadIdx.x == 0 && threadIdx.y == 0 ) { int indexTopHalo, indexBottomHalo, indexLeftHalo, indexRightHalo; //For Bottom and top row for ( i = 0 ; i < HALO ; i++ ) { for ( j = 0 ; j < smColDim ; j++ ) { indexTopHalo = (blockIdx.x*blockDim.x*TILE_SIZE+i)*N + (blockIdx.y*blockDim.y*TILE_SIZE) + j; indexBottomHalo = (HALO + (blockIdx.x+1)*blockDim.x*TILE_SIZE)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+j; shSrc[i*smColDim+j] = src[indexTopHalo]; shSrc[(HALO+blockDim.x*TILE_SIZE+i)*smColDim + j] = src[indexBottomHalo]; } } //For right and left Columns for ( i = 0 ; i < HALO ; i++ ) { for ( j = 0 ; j < smRowDim-HALO*2; j ++ ) { indexLeftHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+i; indexRightHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + ((blockIdx.y+1)*blockDim.y*TILE_SIZE)+HALO+i; shSrc[(HALO+j)*smColDim+i] = src[indexLeftHalo]; shSrc[(HALO+j+1)*smColDim-HALO+i] = src[indexRightHalo]; } } } __syncthreads(); for ( i = 0 ; i < TILE_SIZE ; i++ ) { for ( j = 0 ; j < TILE_SIZE ; j++ ) { int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO; int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j; //Getting the neighbohrs north = shSrc[shMemIndex-smColDim]; south = shSrc[shMemIndex+smColDim]; east = shSrc[shMemIndex+1]; west = shSrc[shMemIndex-1]; //Real Stencil operation dst[globalIndex] = ( north + south + east + west )/5.5; // dst[globalIndex] = ( north + south + east + west )/4; } } __syncthreads(); } /** * Nave 4pt stencil code for 2D arrays. */ void stencil2D4pt ( double* __restrict__ dst, double* __restrict__ src, const size_t n_rows, const size_t n_cols, const size_t n_tsteps ) { typedef double (*Array2D)[n_cols]; volatile Array2D DST = (Array2D) dst, SRC = (Array2D) src; for (size_t ts = 0; ts < n_tsteps; ++ts) { for (size_t i = 1; i < n_rows-1; ++i) { for (size_t j = 1; j < n_cols-1; ++j) { DST[i][j] = (SRC[i-1][j] + SRC[i+1][j] + SRC[i][j-1] + SRC[i][j+1])/5.5; } } SWAP_PTR(&DST,&SRC); } } extern "C" void stencil2D4pt_gpu( double * __restrict__ dst, double* __restrict__ src, const size_t M, const size_t N, const size_t NUM_ITERATIONS)//M Rows by N Columns { double size = sizeof(double) * M * N; //device memory allocation double * d_dst, * d_src; hipMalloc( (void **) &d_dst, size); hipMalloc( (void **) &d_src, size); //dimmensions for indexes // TODO the -2 is to remove the borders dim3 dimBlock(MAX_BLOCK_DIM,MAX_BLOCK_DIM); int gridx = (N-2)/(MAX_BLOCK_DIM*TILE_SIZE) + (((N-2)%(MAX_BLOCK_DIM*TILE_SIZE) == 0)? 0:1 ) ; int gridy = (M-2)/(MAX_BLOCK_DIM*TILE_SIZE) + (((M-2)%(MAX_BLOCK_DIM*TILE_SIZE) == 0)? 0:1 ) ; dim3 dimGrid(gridx,gridy); //Shared memory size = inside + halo int shMemSize=MAX_BLOCK_DIM*TILE_SIZE*MAX_BLOCK_DIM*TILE_SIZE*sizeof(double)+(HALO*MAX_BLOCK_DIM*TILE_SIZE+HALO*HALO)*4*sizeof(double); //Hackaton dimensions dim3 dimGrid_hack1((N-HALO*2)/GRID_TILE_X,(M-HALO*2)/GRID_TILE_Y); //Copying the device memory hipMemcpy(d_src, src, size, hipMemcpyHostToDevice); hipMemcpy(d_dst, dst, size, hipMemcpyHostToDevice); //printf("CUDA Stencil Code running... cycles = %d. dim = %d by %d \n",NUM_ITERATIONS,M,N); int time_step = NUM_ITERATIONS; while (time_step-- > 0) { //gpu_stencil2D_4pt<<<dimGrid,dimBlock,shMemSize>>>(d_dst,d_src,M,N); //gpu_stencil2D_4pt_hack1<<<dimGrid_hack1,NUM_THREADS>>>(d_dst,d_src,M,N); //JOSE Hackathon! //printf("before: d_src[10] = %ld",d_src[10]); hipLaunchKernelGGL(( gpu_stencil2D_4pt_hack2), dim3(dimGrid_hack1),dim3(NUM_THREADS), 0, 0, d_dst,d_src,M,N); //Inline swapping. //printf("after: d_src[10] = %ld",d_src[10]); double * temp; if ( NUM_ITERATIONS%2 ==0 || time_step !=0) { temp=d_src; d_src=d_dst; d_dst=temp; } } //Copying memory back from device to DRAM //hipMemcpy(src, d_src, size, hipMemcpyDeviceToHost); hipMemcpy(dst, d_dst, size, hipMemcpyDeviceToHost); hipMemcpy(src, d_src, size, hipMemcpyDeviceToHost); //Free device memory hipFree(d_src); hipFree(d_dst); } //void* //stencil_run(void* arg) //{ // stencil_t* stencil = (stencil_t*)arg; // STENCIL_COMPUTE(stencil->stencil,stencil->arg); // return NULL; //} void gpu_kernel5(dim3 dimGrid,dim3 dimBlock,double * d_dst, double * d_src,int tile_y,int rowPos,int colPos, int M, int N,hipStream_t &stream){ int sharedMemSize = sizeof(double)*(1+HALO*2)*(dimBlock.x+2); #ifdef CUDA_DARTS_DEBUG printf("sharedMemSize: %dKB, total sharedMemSize: %dKB\n",sharedMemSize/1024, sharedMemSize*dimGrid.x*dimGrid.y/1024); #endif hipLaunchKernelGGL(( gpu_stencil2D_4pt_hack5), dim3(dimGrid),dim3(dimBlock),sharedMemSize,stream, d_dst,d_src,tile_y,rowPos,colPos,M,N); #ifdef CUDA_DARTS_DEBUG printf("gpu kernel return to host, but kernel haven't finished!\n"); #endif } void gpu_kernel5_thread1(dim3 dimGrid,dim3 dimBlock,double * d_dst, double * d_src,int tile_y,int rowPos,int colPos, int M, int N,hipStream_t &stream){ int sharedMemSize = sizeof(double)*(1+HALO*2)*(dimBlock.x+2); #ifdef CUDA_DARTS_DEBUG printf("sharedMemSize: %dKB, total sharedMemSize: %dKB\n",sharedMemSize/1024, sharedMemSize*dimGrid.x*dimGrid.y/1024); #endif hipLaunchKernelGGL(( gpu_stencil2D_4pt_hack5_thread1), dim3(dimGrid),dim3(dimBlock),sharedMemSize,stream, d_dst,d_src,tile_y,rowPos,colPos,M,N); #ifdef CUDA_DARTS_DEBUG printf("gpu kernel return to host, but kernel haven't finished!\n"); #endif } void gpu_kernel4(dim3 dimGrid,dim3 dimBlock,double * d_dst, double * d_src, int M, int N){ int sharedMemSize = sizeof(double)*(1+HALO*2)*(dimBlock.x+2); #ifdef CUDA_DARTS_DEBUG printf("sharedMemSize: %dKB, total sharedMemSize: %dKB\n",sharedMemSize/1024, sharedMemSize*dimGrid.x*dimGrid.y/1024); #endif hipLaunchKernelGGL(( gpu_stencil2D_4pt_hack4), dim3(dimGrid),dim3(dimBlock),sharedMemSize, 0, d_dst,d_src,M,N); #ifdef CUDA_DARTS_DEBUG printf("gpu kernel return to host, but kernel haven't finished!\n"); #endif } void gpu_kernel1(dim3 dimGrid_hack1,double * d_dst, double * d_src, int M, int N){ hipLaunchKernelGGL(( gpu_stencil2D_4pt_hack2), dim3(dimGrid_hack1),dim3(NUM_THREADS), 0, 0, d_dst,d_src,M,N); } void gpu_kernel3(hipStream_t &stream,dim3 dimGrid_hack1,double * d_dst, double * d_src, int M, int N){ hipLaunchKernelGGL(( gpu_stencil2D_4pt_hack2), dim3(dimGrid_hack1),dim3(NUM_THREADS),0,stream, d_dst,d_src,M,N); } void gpu_kernel2(dim3 dimGrid_hack1,double *dst, double *src, double size, size_t ts, double * d_dst, double * d_src, int M, int N){ double * tmp; while (--ts!=0){ printf("ts:%ld \n", ts); hipLaunchKernelGGL(( gpu_stencil2D_4pt_hack2), dim3(dimGrid_hack1),dim3(NUM_THREADS), 0, 0, d_dst,d_src,M,N); tmp = d_src; d_src = d_dst; d_dst=tmp; } }
e885824b49d0edbc02c067da200a106ca88adbe1.cu
extern "C" { #include <cuda.h> #include "conf.h" #include "stencil.h" } #include <stdio.h> #define ROTATE_DOWN(val,MAX) ((val-1==-1)?MAX-1:val-1) #define ROTATE_UP(val,MAX) ((val+1)%MAX) /** * GPU Device kernel for the for 2D stencil * First attempt during hackaton * M = Rows, N = Cols INCLUDING HALOS * In this version now we replace the size of the shared memory to be just 3 rows (actually 1+HALO*2) rows */ __global__ void gpu_stencil2D_4pt_hack5(double * dst, double * src,int tile_y,int rowPos,int colPos, int M, int N) { #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel begin!\n"); } #endif //Declaring the shared memory array for source extern __shared__ double shared_mem[] ; //indexes int i,j, k,curRow; //Cols * numRows/Tile * tileIndex int base_global_row = rowPos + ( tile_y* blockIdx.y ); int base_global_col = colPos + blockDim.x*blockIdx.x; int base_global_idx = N*base_global_row + base_global_col ; int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory int t = threadIdx.x; //copy the shared memory to fill the pipeline for (i = 0 ; i < 1+HALO*2 ; i ++ ){ k = base_global_idx+i*N+t; j = i*(blockDim.x+2) + t; shared_mem [j] =src[k]; if(t<2){ shared_mem[j+blockDim.x]=src[k+blockDim.x]; } // if(blockDim.x==1){ // shared_mem[j+blockDim.x+1]=src[k+blockDim.x+1]; // } } __syncthreads(); j = t+HALO; //Pipelined copy one row and process it for ( curRow = HALO; curRow < tile_y; curRow+=1 ) { //Stencil computation //top + bottom + left + right dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; __syncthreads(); //We are copying from src to shared memory. int nextRow2 = base_global_row+curRow+2; k = base_global_col+nextRow2*N+t; shared_mem [north*(blockDim.x+2)+t] =src[k]; if(t<2){ shared_mem[north*(blockDim.x+2)+t+blockDim.x]=src[k+blockDim.x]; } // if(blockDim.x==1){ // shared_mem[north*(blockDim.x+2)+t+blockDim.x+1]=src[k+blockDim.x+1]; // } center = ROTATE_UP(center,3); south = ROTATE_UP(south,3); north = ROTATE_UP(north,3); __syncthreads(); } //last row dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; #ifdef CUDA_DARTS_DEBUG if(threadIdx.x==0 && blockIdx.x ==0 ){ printf("tile_y= %d, last row=%d \n",tile_y, base_global_row + curRow); printf("blockDim.x= %d, first cols=%d \n",blockDim.x, base_global_col + threadIdx.x+1); } if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel finish!\n"); } #endif } __global__ void gpu_stencil2D_4pt_hack5_thread1(double * dst, double * src,int tile_y,int rowPos,int colPos, int M, int N) { #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel begin!\n"); } #endif //Declaring the shared memory array for source extern __shared__ double shared_mem[] ; //indexes int i,j, k,curRow; //Cols * numRows/Tile * tileIndex int base_global_row = rowPos + ( tile_y* blockIdx.y ); int base_global_col = colPos + blockDim.x*blockIdx.x; int base_global_idx = N*base_global_row + base_global_col ; int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory int t = threadIdx.x; //copy the shared memory to fill the pipeline for (i = 0 ; i < 1+HALO*2 ; i ++ ){ k = base_global_idx+i*N+t; j = i*(blockDim.x+2) + t; shared_mem [j] =src[k]; shared_mem[j+blockDim.x]=src[k+blockDim.x]; shared_mem[j+blockDim.x+1]=src[k+blockDim.x+1]; } __syncthreads(); j = t+HALO; //Pipelined copy one row and process it for ( curRow = HALO; curRow < tile_y; curRow+=1 ) { //Stencil computation //top + bottom + left + right dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; __syncthreads(); //We are copying from src to shared memory. int nextRow2 = base_global_row+curRow+2; k = base_global_col+nextRow2*N+t; shared_mem [north*(blockDim.x+2)+t] =src[k]; shared_mem[north*(blockDim.x+2)+t+blockDim.x]=src[k+blockDim.x]; shared_mem[north*(blockDim.x+2)+t+blockDim.x+1]=src[k+blockDim.x+1]; center = ROTATE_UP(center,3); south = ROTATE_UP(south,3); north = ROTATE_UP(north,3); __syncthreads(); } //last row dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; #ifdef CUDA_DARTS_DEBUG if(threadIdx.x==0 && blockIdx.x ==0 ){ printf("tile_y= %d, last row=%d \n",tile_y, base_global_row + curRow); printf("blockDim.x= %d, first cols=%d \n",blockDim.x, base_global_col + threadIdx.x+1); } if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel finish!\n"); } #endif } //__global__ void gpu_stencil2D_4pt_hack4(double * dst, double * src, int M, int N) //{ //// printf("kernel begin!\n"); // //Declaring the shared memory array for source // extern __shared__ double shared_mem[] ; // // //indexes // int i, j, k,curRow; // //Cols * numRows/Tile * tileIndex // int base_global_row = ( N ) * ( GRID_TILE_Y * blockIdx.y ); // int base_global_col = blockDim.x*blockIdx.x; // int base_global_idx = base_global_row + base_global_col ; // int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory // int t = threadIdx.x; // // //copy the shared memory to fill the pipeline // bool rowLeft = (blockIdx.y==(gridDim.y-1))&&(M%GRID_TILE_Y<3)&&(M%GRID_TILE_Y>0); // int numRowLeft =(rowLeft)?(3-M%GRID_TILE_Y):0; // bool noColsLeft = (base_global_col +t )<N; // bool noColsLeft2= (base_global_col+t+2)<N; // for (i = 0 ; i < 1+HALO*2-numRowLeft ; i ++ ){ // k = base_global_idx+i*N+t; // j = i*(blockDim.x+2) + t; // shared_mem [j] = (noColsLeft)?src[k]:0.0; // if((t<2) &&(noColsLeft)){ // shared_mem[j+blockDim.x]=src[k+blockDim.x]; // } // } // // __syncthreads(); // // int tt = (((blockIdx.y+1)*GRID_TILE_Y)>M)?(M%GRID_TILE_Y): GRID_TILE_Y; // int ss = (((M%GRID_TILE_Y)==1)&&(blockIdx.y ==(gridDim.y-2)))?-1:0; // // int lastRow = ((blockIdx.y == (gridDim.y-1))?-1:1)+tt +ss ; //// printf("lastRow:%d \n",lastRow ); // //Pipelined copy one row and process it // for ( curRow = HALO; curRow < lastRow; curRow+=1 ) // { // //Stencil computation // //top + bottom + left + right // j = threadIdx.x+HALO; // if(noColsLeft2){ // dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; // } // __syncthreads(); // //We are copying from src to shared memory. // k=base_global_idx+(curRow+2)*N+threadIdx.x; // if(k<M*N){ // shared_mem [north*(blockDim.x+2)+threadIdx.x] =(noColsLeft)? src[k]:0.0; // } // if((t<2)&&(noColsLeft)&&(k<M*N)){ // shared_mem[north*(blockDim.x+2)+threadIdx.x+blockDim.x]=src[k+blockDim.x]; // } // center = ROTATE_UP(center,3); // south = ROTATE_UP(south,3); // north = ROTATE_UP(north,3); // __syncthreads(); // } // //// printf("kernel finish!\n"); //} __global__ void gpu_stencil2D_4pt_hack4(double * dst, double * src, int M, int N) { #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel begin!\n"); } #endif //Declaring the shared memory array for source extern __shared__ double shared_mem[] ; //indexes int i,j, k,curRow; //Cols * numRows/Tile * tileIndex int base_global_row = ( GRID_TILE_Y * blockIdx.y ); int base_global_col = blockDim.x*blockIdx.x; int base_global_idx = N*base_global_row + base_global_col ; int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory int t = threadIdx.x; //copy the shared memory to fill the pipeline bool legalCol = (base_global_col +t )<N; bool legalCol2= (base_global_col+t+2)<N; bool legalColn= (base_global_col+t+blockDim.x)<N; for (i = 0 ; i < 1+HALO*2 ; i ++ ){ k = base_global_idx+i*N+t; j = i*(blockDim.x+2) + t; bool legalRow = (base_global_row+i)<M; shared_mem [j] =legalRow?( legalCol?src[k]:0.0):0.0; if((t<2)&&legalColn&&legalRow){ shared_mem[j+blockDim.x]=src[k+blockDim.x]; } } __syncthreads(); //Pipelined copy one row and process it for ( curRow = HALO; curRow < GRID_TILE_Y+1; curRow+=1 ) { //Stencil computation //top + bottom + left + right j = threadIdx.x+HALO; bool legalRow1 =( base_global_row+curRow+1)<M; if((legalCol2)&&(legalRow1)){ dst[base_global_idx + curRow*N + j] =(shared_mem[north*(blockDim.x+2)+j] + shared_mem[south*(blockDim.x+2)+j] + shared_mem[center*(blockDim.x+2)+j-1] + shared_mem[center*(blockDim.x+2)+j+1] )/5.5; } __syncthreads(); //We are copying from src to shared memory. int nextRow2 = base_global_row+curRow+2; bool legalRow2 = nextRow2<M; k = base_global_col+nextRow2*N+t; shared_mem [north*(blockDim.x+2)+t] =(legalRow2&&legalCol)?src[k]:0.0; if((t<2)&&legalColn&&legalRow2){ shared_mem[north*(blockDim.x+2)+t+blockDim.x]=src[k+blockDim.x]; } center = ROTATE_UP(center,3); south = ROTATE_UP(south,3); north = ROTATE_UP(north,3); __syncthreads(); } #ifdef CUDA_DARTS_DEBUG if((blockIdx.x==0)&&(blockIdx.y==0)&&(threadIdx.x==0)){ printf("kernel finish!\n"); } #endif } __global__ void gpu_stencil2D_4pt_hack2(double * dst, double * src, int M, int N) { // printf("kernel begin!\n"); //Declaring the shared memory array for source __shared__ double shared_mem[ 1 + HALO*2 ] [ GRID_TILE_X + HALO*2]; //1 is the row I am modifying //double * shSrc = shared_mem; //indexes int i, j, curRow; //Cols * numRows/Tile * tileIndex int base_global_idx = ( N ) * ( GRID_TILE_Y * blockIdx.y ) + GRID_TILE_X*blockIdx.x; int center = 1,north = 0,south = 2; //indexes for the current location in the shared memory //copy the shared memory to fill the pipeline for (i = 0 ; i < 1+HALO*2 ; i ++ ) for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x) { shared_mem [i][j] = src[base_global_idx + i*N + j]; } __syncthreads(); //Pipelined copy one row and process it for ( curRow = HALO; curRow < GRID_TILE_Y; curRow+=1 ) { //Stencil computation for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x) { //top + bottom + left + right dst[base_global_idx + curRow*N + j] = (shared_mem[north][j] + shared_mem[south][j] + shared_mem[center][j-1] + shared_mem[center][j+1] )/5.5; } __syncthreads(); //We are copying from dst to shared memory. for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x) { shared_mem [north][j] = src[base_global_idx + (curRow+2)*N + j]; } center = ROTATE_UP(center,3); south = ROTATE_UP(south,3); north = ROTATE_UP(north,3); __syncthreads(); } //Dranning the pipeline for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x) { //top + bottom + left + right dst[base_global_idx + curRow*N + j] = (shared_mem[north][j] + shared_mem[south][j] + shared_mem[center][j-1] + shared_mem[center][j+1] )/5.5; } __syncthreads(); // printf("kernel finish!\n"); } ///** // * GPU Device kernel for the for 2D stencil // * First attempt during hackaton // * M = Rows, N = Cols INCLUDING HALOS // */ //__global__ void gpu_stencil2D_4pt_hack1(double * dst, double * src, int M, int N) //{ // // //Declaring the shared memory array for source // __shared__ double shared_mem[GRID_TILE_Y + HALO*2 ] [ GRID_TILE_X + HALO*2]; // //double * shSrc = shared_mem; // // //indexes // int i, j; // // //Cols * numRows/Tile * tileIndex // int base_global_idx = ( N ) * ( GRID_TILE_Y * blockIdx.y ) + GRID_TILE_X*blockIdx.x; // // //We are copying from dst to shared memory. // for (i = 0 ; i < GRID_TILE_Y+2*HALO ; i ++ ) // for (j = threadIdx.x ; j < GRID_TILE_X+2*HALO ; j+=blockDim.x) // { // shared_mem [i][j] = src[base_global_idx + i*N + j]; // } // // __syncthreads(); // // //Stencil computation // for (i = HALO ; i < GRID_TILE_Y+HALO ; i ++ ) // for (j = threadIdx.x + HALO ; j < GRID_TILE_X+HALO ; j+=blockDim.x) // { // //top + bottom + left + right // dst[base_global_idx + i*N + j] = (shared_mem[i-1][j] + shared_mem[i+1][j] + shared_mem[i][j-1] + shared_mem[i][j+1] )/5.5; // } // // __syncthreads(); //} /** * GPU Device kernel for the for 2D stencil * M = Rows, N = Cols */ __global__ void gpu_stencil2D_4pt(double * dst, double * src, int M, int N) { //Declaring the shared memory array for source extern __shared__ double shared_mem[]; double * shSrc = shared_mem; //indexes int i, j; //neighbor's values double north, south, east, west; //SharedMem Collumns Dimension int smColDim = HALO*2+blockDim.y*TILE_SIZE; int smRowDim = HALO*2+blockDim.x*TILE_SIZE; //Copying to shared memory //Inner part for ( i = 0 ; i < TILE_SIZE ; i++ ) { for ( j = 0 ; j < TILE_SIZE ; j++ ) { int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO; int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j; shSrc[shMemIndex]=src[globalIndex]; } } //Halos if (threadIdx.x == 0 && threadIdx.y == 0 ) { int indexTopHalo, indexBottomHalo, indexLeftHalo, indexRightHalo; //For Bottom and top row for ( i = 0 ; i < HALO ; i++ ) { for ( j = 0 ; j < smColDim ; j++ ) { indexTopHalo = (blockIdx.x*blockDim.x*TILE_SIZE+i)*N + (blockIdx.y*blockDim.y*TILE_SIZE) + j; indexBottomHalo = (HALO + (blockIdx.x+1)*blockDim.x*TILE_SIZE)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+j; shSrc[i*smColDim+j] = src[indexTopHalo]; shSrc[(HALO+blockDim.x*TILE_SIZE+i)*smColDim + j] = src[indexBottomHalo]; } } //For right and left Columns for ( i = 0 ; i < HALO ; i++ ) { for ( j = 0 ; j < smRowDim-HALO*2; j ++ ) { indexLeftHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+i; indexRightHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + ((blockIdx.y+1)*blockDim.y*TILE_SIZE)+HALO+i; shSrc[(HALO+j)*smColDim+i] = src[indexLeftHalo]; shSrc[(HALO+j+1)*smColDim-HALO+i] = src[indexRightHalo]; } } } __syncthreads(); for ( i = 0 ; i < TILE_SIZE ; i++ ) { for ( j = 0 ; j < TILE_SIZE ; j++ ) { int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO; int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j; //Getting the neighbohrs north = shSrc[shMemIndex-smColDim]; south = shSrc[shMemIndex+smColDim]; east = shSrc[shMemIndex+1]; west = shSrc[shMemIndex-1]; //Real Stencil operation dst[globalIndex] = ( north + south + east + west )/5.5; // dst[globalIndex] = ( north + south + east + west )/4; } } __syncthreads(); } /** * Naïve 4pt stencil code for 2D arrays. */ void stencil2D4pt ( double* __restrict__ dst, double* __restrict__ src, const size_t n_rows, const size_t n_cols, const size_t n_tsteps ) { typedef double (*Array2D)[n_cols]; volatile Array2D DST = (Array2D) dst, SRC = (Array2D) src; for (size_t ts = 0; ts < n_tsteps; ++ts) { for (size_t i = 1; i < n_rows-1; ++i) { for (size_t j = 1; j < n_cols-1; ++j) { DST[i][j] = (SRC[i-1][j] + SRC[i+1][j] + SRC[i][j-1] + SRC[i][j+1])/5.5; } } SWAP_PTR(&DST,&SRC); } } extern "C" void stencil2D4pt_gpu( double * __restrict__ dst, double* __restrict__ src, const size_t M, const size_t N, const size_t NUM_ITERATIONS)//M Rows by N Columns { double size = sizeof(double) * M * N; //device memory allocation double * d_dst, * d_src; cudaMalloc( (void **) &d_dst, size); cudaMalloc( (void **) &d_src, size); //dimmensions for indexes // TODO the -2 is to remove the borders dim3 dimBlock(MAX_BLOCK_DIM,MAX_BLOCK_DIM); int gridx = (N-2)/(MAX_BLOCK_DIM*TILE_SIZE) + (((N-2)%(MAX_BLOCK_DIM*TILE_SIZE) == 0)? 0:1 ) ; int gridy = (M-2)/(MAX_BLOCK_DIM*TILE_SIZE) + (((M-2)%(MAX_BLOCK_DIM*TILE_SIZE) == 0)? 0:1 ) ; dim3 dimGrid(gridx,gridy); //Shared memory size = inside + halo int shMemSize=MAX_BLOCK_DIM*TILE_SIZE*MAX_BLOCK_DIM*TILE_SIZE*sizeof(double)+(HALO*MAX_BLOCK_DIM*TILE_SIZE+HALO*HALO)*4*sizeof(double); //Hackaton dimensions dim3 dimGrid_hack1((N-HALO*2)/GRID_TILE_X,(M-HALO*2)/GRID_TILE_Y); //Copying the device memory cudaMemcpy(d_src, src, size, cudaMemcpyHostToDevice); cudaMemcpy(d_dst, dst, size, cudaMemcpyHostToDevice); //printf("CUDA Stencil Code running... cycles = %d. dim = %d by %d \n",NUM_ITERATIONS,M,N); int time_step = NUM_ITERATIONS; while (time_step-- > 0) { //gpu_stencil2D_4pt<<<dimGrid,dimBlock,shMemSize>>>(d_dst,d_src,M,N); //gpu_stencil2D_4pt_hack1<<<dimGrid_hack1,NUM_THREADS>>>(d_dst,d_src,M,N); //JOSE Hackathon! //printf("before: d_src[10] = %ld",d_src[10]); gpu_stencil2D_4pt_hack2<<<dimGrid_hack1,NUM_THREADS>>>(d_dst,d_src,M,N); //Inline swapping. //printf("after: d_src[10] = %ld",d_src[10]); double * temp; if ( NUM_ITERATIONS%2 ==0 || time_step !=0) { temp=d_src; d_src=d_dst; d_dst=temp; } } //Copying memory back from device to DRAM //cudaMemcpy(src, d_src, size, cudaMemcpyDeviceToHost); cudaMemcpy(dst, d_dst, size, cudaMemcpyDeviceToHost); cudaMemcpy(src, d_src, size, cudaMemcpyDeviceToHost); //Free device memory cudaFree(d_src); cudaFree(d_dst); } //void* //stencil_run(void* arg) //{ // stencil_t* stencil = (stencil_t*)arg; // STENCIL_COMPUTE(stencil->stencil,stencil->arg); // return NULL; //} void gpu_kernel5(dim3 dimGrid,dim3 dimBlock,double * d_dst, double * d_src,int tile_y,int rowPos,int colPos, int M, int N,cudaStream_t &stream){ int sharedMemSize = sizeof(double)*(1+HALO*2)*(dimBlock.x+2); #ifdef CUDA_DARTS_DEBUG printf("sharedMemSize: %dKB, total sharedMemSize: %dKB\n",sharedMemSize/1024, sharedMemSize*dimGrid.x*dimGrid.y/1024); #endif gpu_stencil2D_4pt_hack5<<<dimGrid,dimBlock,sharedMemSize,stream>>>(d_dst,d_src,tile_y,rowPos,colPos,M,N); #ifdef CUDA_DARTS_DEBUG printf("gpu kernel return to host, but kernel haven't finished!\n"); #endif } void gpu_kernel5_thread1(dim3 dimGrid,dim3 dimBlock,double * d_dst, double * d_src,int tile_y,int rowPos,int colPos, int M, int N,cudaStream_t &stream){ int sharedMemSize = sizeof(double)*(1+HALO*2)*(dimBlock.x+2); #ifdef CUDA_DARTS_DEBUG printf("sharedMemSize: %dKB, total sharedMemSize: %dKB\n",sharedMemSize/1024, sharedMemSize*dimGrid.x*dimGrid.y/1024); #endif gpu_stencil2D_4pt_hack5_thread1<<<dimGrid,dimBlock,sharedMemSize,stream>>>(d_dst,d_src,tile_y,rowPos,colPos,M,N); #ifdef CUDA_DARTS_DEBUG printf("gpu kernel return to host, but kernel haven't finished!\n"); #endif } void gpu_kernel4(dim3 dimGrid,dim3 dimBlock,double * d_dst, double * d_src, int M, int N){ int sharedMemSize = sizeof(double)*(1+HALO*2)*(dimBlock.x+2); #ifdef CUDA_DARTS_DEBUG printf("sharedMemSize: %dKB, total sharedMemSize: %dKB\n",sharedMemSize/1024, sharedMemSize*dimGrid.x*dimGrid.y/1024); #endif gpu_stencil2D_4pt_hack4<<<dimGrid,dimBlock,sharedMemSize>>>(d_dst,d_src,M,N); #ifdef CUDA_DARTS_DEBUG printf("gpu kernel return to host, but kernel haven't finished!\n"); #endif } void gpu_kernel1(dim3 dimGrid_hack1,double * d_dst, double * d_src, int M, int N){ gpu_stencil2D_4pt_hack2<<<dimGrid_hack1,NUM_THREADS>>>(d_dst,d_src,M,N); } void gpu_kernel3(cudaStream_t &stream,dim3 dimGrid_hack1,double * d_dst, double * d_src, int M, int N){ gpu_stencil2D_4pt_hack2<<<dimGrid_hack1,NUM_THREADS,0,stream>>>(d_dst,d_src,M,N); } void gpu_kernel2(dim3 dimGrid_hack1,double *dst, double *src, double size, size_t ts, double * d_dst, double * d_src, int M, int N){ double * tmp; while (--ts!=0){ printf("ts:%ld \n", ts); gpu_stencil2D_4pt_hack2<<<dimGrid_hack1,NUM_THREADS>>>(d_dst,d_src,M,N); tmp = d_src; d_src = d_dst; d_dst=tmp; } }
85b169117842a2b2397e672940541b3124385142.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include "unistd.h" #include "time.h" #include "string.h" #include <hip/hip_runtime.h> // ---------------------- Optimised Dedispersion Loop ------------------------------ __global__ void testAtomicCas(int *buffer, int nsamp, int factor) { if (blockIdx.x * blockDim.x + threadIdx.x > nsamp * factor) return; // Get memory index int ind = (blockIdx.x * blockDim.x + threadIdx.x) / factor; // Perform atomic CAS while (atomicCAS(buffer + ind, 0 , 1) == 0); } // -------------------------- Main Program ----------------------------------- int nsamp = 1024, blocksize = 128, factor = 32; // Process command-line parameters void process_arguments(int argc, char *argv[]) { int i = 1; while((fopen(argv[i], "r")) != NULL) i++; while(i < argc) { if (!strcmp(argv[i], "-nsamp")) nsamp = atoi(argv[++i]); else if (!strcmp(argv[i], "-blocksize")) blocksize = atoi(argv[++i]); else if (!strcmp(argv[i], "-factor")) factor = atoi(argv[++i]); i++; } } int main(int argc, char *argv[]) { int *buffer, *d_buffer; int i; process_arguments(argc, argv); // Allocate and initialise arrays buffer = (int *) malloc(nsamp * sizeof(int)); // Initialise CUDA stuff hipSetDevice(0); hipEvent_t event_start, event_stop; float timestamp; hipEventCreate(&event_start); hipEventCreate(&event_stop); printf("nsamp: %d, blocksize: %d, factor: %d\n", nsamp, blocksize, factor); // Allocate CUDA memory hipMalloc((void **) &d_buffer, nsamp * sizeof(int)); hipMemset(d_buffer, 0, nsamp * sizeof(int)); time_t start = time(NULL); // Launch GPU kernel dim3 gridDim(factor * nsamp / blocksize, 1); hipEventRecord(event_start, 0); hipLaunchKernelGGL(( testAtomicCas), dim3(gridDim), dim3(blocksize), 0, 0, d_buffer, nsamp, factor); hipEventRecord(event_stop, 0); hipEventSynchronize(event_stop); hipEventElapsedTime(&timestamp, event_start, event_stop); printf("Processed in: %lf\n", timestamp); // Copy output from GPU hipEventRecord(event_start, 0); hipMemcpy(buffer, d_buffer, nsamp * sizeof(int), hipMemcpyDeviceToHost); hipEventRecord(event_stop, 0); hipEventSynchronize(event_stop); hipEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied from GPU in: %lf\n", timestamp); // Check values for(i = 0; i < nsamp; i++) if (buffer[i] != 1) printf("buffer[%d] = %d\n", i, buffer[i]); printf("Total time: %d\n", (int) (time(NULL) - start)); }
85b169117842a2b2397e672940541b3124385142.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include "unistd.h" #include "time.h" #include "string.h" #include <cuda_runtime.h> // ---------------------- Optimised Dedispersion Loop ------------------------------ __global__ void testAtomicCas(int *buffer, int nsamp, int factor) { if (blockIdx.x * blockDim.x + threadIdx.x > nsamp * factor) return; // Get memory index int ind = (blockIdx.x * blockDim.x + threadIdx.x) / factor; // Perform atomic CAS while (atomicCAS(buffer + ind, 0 , 1) == 0); } // -------------------------- Main Program ----------------------------------- int nsamp = 1024, blocksize = 128, factor = 32; // Process command-line parameters void process_arguments(int argc, char *argv[]) { int i = 1; while((fopen(argv[i], "r")) != NULL) i++; while(i < argc) { if (!strcmp(argv[i], "-nsamp")) nsamp = atoi(argv[++i]); else if (!strcmp(argv[i], "-blocksize")) blocksize = atoi(argv[++i]); else if (!strcmp(argv[i], "-factor")) factor = atoi(argv[++i]); i++; } } int main(int argc, char *argv[]) { int *buffer, *d_buffer; int i; process_arguments(argc, argv); // Allocate and initialise arrays buffer = (int *) malloc(nsamp * sizeof(int)); // Initialise CUDA stuff cudaSetDevice(0); cudaEvent_t event_start, event_stop; float timestamp; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); printf("nsamp: %d, blocksize: %d, factor: %d\n", nsamp, blocksize, factor); // Allocate CUDA memory cudaMalloc((void **) &d_buffer, nsamp * sizeof(int)); cudaMemset(d_buffer, 0, nsamp * sizeof(int)); time_t start = time(NULL); // Launch GPU kernel dim3 gridDim(factor * nsamp / blocksize, 1); cudaEventRecord(event_start, 0); testAtomicCas<<<gridDim, blocksize>>>(d_buffer, nsamp, factor); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Processed in: %lf\n", timestamp); // Copy output from GPU cudaEventRecord(event_start, 0); cudaMemcpy(buffer, d_buffer, nsamp * sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); cudaEventElapsedTime(&timestamp, event_start, event_stop); printf("Copied from GPU in: %lf\n", timestamp); // Check values for(i = 0; i < nsamp; i++) if (buffer[i] != 1) printf("buffer[%d] = %d\n", i, buffer[i]); printf("Total time: %d\n", (int) (time(NULL) - start)); }
65ad8685f86550b3c6b9ec41c5e6fb8c412a40eb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <GL/glut.h> #include <GL/gl.h> #include <malloc.h> #include <signal.h> #include <hip/hip_runtime_api.h> /****************************************************************************** The variable names and the function names of this program is same as provided by the university. The added variable and function are the only changes made to this program. Compile with: nvcc -o cudaip cudaip.cu -lglut -lGL ./cudaip ******************************************************************************/ #define width 100 #define height 72 unsigned char results[width * height]; unsigned char image[] = {255,255,255,255,0,0,255,255,255,255,255,255,255,0,255,255,0,0, 255,0,0,255,255,255,255,0,0,0,0,255,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255, 0,0,255,0,0,255,255,0,255,255,255,255,255,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,255,255, 255,255,0,0,255,255,0,0,0,0,255,255,0,0,255,255,255,255,255, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255, 255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255, 255,255,0,0,0,0,0,0,0,255,255,0,0,0,0,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255, 255,255,0,0,255,255,255,0,0,255,255,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,255,255,255,0,0,0,255,255,255,255,255,255,0,0, 0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 255,255,0,0,255,255,255,0,0,255,255,255,255,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,255,255,255,255,0,0,255,255,255,255, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,255,0,0,255,255,255,0,0,255,255,255,255,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,0,255,255,255,255,255, 0,255,255,255,255,0,0,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,255,255,255,0,0,0,255,255,0,0,0,0,0,0, 0,0,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,0, 0,0,0,255,255,255,0,0,255,255,0,0,255,255,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,255,255,255,255,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255, 0,0,255,255,255,0,0,255,255,255,255,0,0,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,0,0,255,255,0,0,255,255,255,0,0,255,255,255,255,0,0,255, 255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255, 255,255,255,255,255,255,255,0,0,255,0,0,255,255,255,255,0,0,255, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,255,255,255,0,0,255,255,255, 255,255,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255, 255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255, 255,0,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255, 255,255,0,0,0,255,0,0,255,255,255,255,255,0,255,255,255,255,255, 255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255, 255,0,0,255,255,255,255,255,0,0,255,0,0,255,255,255,255,255,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,0,0, 255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255,255,255,255, 0,0,255,255,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,0,0, 255,255,255,255,255,255,0,255,255,0,0,255,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,255,0,0,255,255,255,255,0, 0,255,255,255,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0, 255,255,255,0,0,255,255,255,255,255,0,0,255,255,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,0, 0,255,255,255,0,0,255,255,255,0,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,0,0,0,255,255,0,0,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,255,255,0,0,255,255,255,255,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,0,0,255,255,0,0,0,255,255,255,255,0,0,255,255, 255,255,255,255,255,255,0,255,255,255,0,0,255,255,255,255,0,0,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255, 255,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255, 255,0,0,255,255,255,255,255,255,255,255,0,0,0,255,0,0,255,255, 255,255,0,0,255,255,255,255,255,255,255,255,255,0,255,255,255,0,0, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,255,0,0,255,255,255,255,255,255,255,0,0,0,255, 255,255,255,0,0,255,255,0,0,255,0,0,0,0,255,255,255,0,0, 255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,0, 255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,255,255,255,255,255,0,0,255,255,0,0,255,0,0,0,0, 255,255,0,0,255,255,255,255,0,0,255,255,0,0,0,0,0,255,255, 255,255,255,255,0,0,255,255,255,0,0,0,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,0,0, 0,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,0, 0,0,255,255,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,0, 0,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,255,255,0,0,255,255,255,255,255,255,255,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 255,255,255,255,255,0,0,255,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255, 255,255,255,255,0,0,0,0,0,255,255,0,0,255,255,0,0,255,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,255,255,255,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255, 255,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255, 255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255, 0,0,255,255,255,0,0,0,255,255,255,255,255,255,255,0,0,255,255, 255,255,255,0,0,255,255,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255,255,255, 0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255, 255,0,255,255,255,0,0,0,0,255,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 255,255,0,0,255,255,0,0,255,255,0,0,255,0,255,255,0,0,255, 255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255, 255,255,255,0,0,255,255,255,0,0,255,255,0,255,255,0,255,255,0, 0,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,0,0, 0,0,255,255,255,0,255,255,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,255,255,0,0,255,255,255,0,0,255,255,255,255, 0,0,255,0,0,0,0,255,255,255,0,0,255,255,255,0,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,255,255,0,0,255,255,255,0,0,0,255,0, 0,255,255,255,255,0,0,255,255,0,0,0,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,255,255,0,0,255,255,255,0,0,255,255,255, 0,0,0,0,0,0,255,255,255,255,255,0,0,255,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255,255,255, 0,0,255,255,255,255,0,0,0,0,0,0,255,255,255,255,0,0,255, 255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,0,255, 255,0,0,0,0,0,0,0,0,255,255,0,0,255,255,0,0,0,0, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,0,0,255,255,255,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,255,255,0,0,0,0,255,255,0,0,0,255,255,0,0, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,255,255,255,0,0,0,255,255,0,0,0,255,255,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255, 255,0,0,0,0,0,255,0,0,255,255,255,0,0,255,255,255,255,0, 0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,0,0,255,255,255,0,0,0,0,255,255,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,255,255,0,0,0,255,255,0,0,255,255,255,0,0, 255,255,255,255,0,0,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,0,0,255,255,0,0,255,255,0,0,0,0, 255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,255,255,255,255,255,255,255,0,0,255,255,0,0, 255,255,255,255,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,0,0,255,0,0,255, 255,0,0,255,0,0,255,255,0,0,255,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255, 0,0,255,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,255,255,0, 0,255,255,0,0,255,0,0,255,255,0,255,255,0,0,255,255,255,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255, 255,255,0,0,255,255,0,0,0,0,255,255,255,255,255,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255, 255,0,0,255,255,0,0,255,255,0,0,0,0,255,255,0,0,255,255, 0,0,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,255,255,255,0,255,255,255,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255, 0,0,255,255,255,255,0,0,255,255,0,0,255,255,0,0,0,0,255, 255,255,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,255,255,255,0,0,0,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,255,0,0,255,0,0,0,0,0,255,255,0,0,255, 0,0,0,0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,255,255,255,255,0,0,0,0,0,255,0,0, 255,255,0,0,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0, 255,255,255,255,0,0,255,255,0,0,255,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255, 255,255,255,0,0,255,255,255,255,0,0,255,255,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,0,0,255,255,255,0,0,0,0,255,255,255,255,255,255,255, 255,255,0,0,255,255,255,255,0,0,255,255,255,255,255,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,255,255,255,0,0,255,255,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,255,255,255,255,255,255,0,0,255,255,0, 0,255,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,255,255,255,255,255,0,0,255,255,255,255,255,255, 255,0,0,255,0,0,255,255,0,255,255,255,255,255,255,255,255,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,0,0,255, 255,255,255,255,255,255,0,0,0,0,0,255,255,0,0,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,0,0,255, 255,255,255,0,0,0,0,0,0,0,255,255,0,0,0,0,255,255,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255, 0,0,0,0,0,255,255,255,0,0,0,0,0,255,255,255,255,0,0, 0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255, 255,255,255,255,255,0,0,255,255,0,0,255,255,255,0,0,255,255,255, 255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,255,255,255,255,255,255,255,255,255,0,255,255,255,0,0,255,255, 0,0,255,255,255,255,0,0,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,0,0,255, 255,255,0,0,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 0,255,255,255,255,255,255,255,255,255,255,0,0,255,0,0,0,0,255, 255,255,0,0,255,255,255,0,0,0,255,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0, 0,0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,0,0,0,255,255,255,255,0,255,255,255,255,255,255,255, 255,255,255,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,0,0,0,255,255,255,255,255,0,255,255,255,255,255,0,0,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,255,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,0,0,0,255,255,255,255,255,0,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,0,0,255,255,255,0,0,0,255,255,255,255, 255,0,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,0,255,255,255,255,0,255,255,255,255,0, 0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,0,255,255,255,255,0, 255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255, 0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,0,0,0,255,255,255,255,0,0, 255,255,255,255,0,0,255,255,255,0,255,255,255,255,255,255,255,0,255, 0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,255, 255,255,255,0,0,0,255,255,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,0,255,0,0,255,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,255,255, 255,255,0,0,255,255,255,255,0,0,0,255,255,255,255,0,255,255,255, 255,255,255,255,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,0,255,255,255, 255,0,0,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 0,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255 }; __global__ void detect_edges(unsigned char *in, unsigned char *out) { int i = (blockIdx.x * 72) + threadIdx.x; int x, y; // the pixel of interest int b, d, f, h; // the pixels adjacent to x,y used for the calculation int r; // the result of calculate y = i / width; x = i - (width * y); if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { out[i] = 0; } else { b = i + width; d = i - 1; f = i + 1; h = i - width; r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1) + (in[h] * -1); if (r > 0) { // if the result is positive this is an edge pixel out[i] = 255; } else { out[i] = 0; } } } void tidy_and_exit() { exit(0); } void sigint_callback(int signal_number){ printf("\nInterrupt from keyboard\n"); tidy_and_exit(); } static void display() { glClear(GL_COLOR_BUFFER_BIT); glRasterPos4i(-1, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image); glRasterPos4i(0, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results); glFlush(); } static void key_pressed(unsigned char key, int x, int y) { switch(key){ case 27: tidy_and_exit(); break; default: printf("\nPress escape to exit\n"); break; } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char **argv) { unsigned char *d_results; unsigned char *d_image; hipMalloc((void**)&d_image, sizeof(unsigned char) * (width * height)); hipMalloc((void**)&d_results, sizeof(unsigned char) * (width * height)); hipMemcpy(d_image, &image, sizeof(unsigned char) * (width * height), hipMemcpyHostToDevice); signal(SIGINT, sigint_callback); struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( detect_edges), dim3(100),dim3(72), 0, 0, d_image, d_results); hipDeviceSynchronize(); hipMemcpy(&results, d_results, sizeof(unsigned char) * (width * height), hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); hipFree(&d_image); hipFree(&d_results); glutInit(&argc, argv); glutInitWindowSize(width * 2,height); glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE); glutCreateWindow("6CS005 Image Progessing Courework"); glutDisplayFunc(display); glutKeyboardFunc(key_pressed); glClearColor(0.0, 1.0, 0.0, 1.0); glutMainLoop(); tidy_and_exit(); return 0; }
65ad8685f86550b3c6b9ec41c5e6fb8c412a40eb.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <GL/glut.h> #include <GL/gl.h> #include <malloc.h> #include <signal.h> #include <cuda_runtime_api.h> /****************************************************************************** The variable names and the function names of this program is same as provided by the university. The added variable and function are the only changes made to this program. Compile with: nvcc -o cudaip cudaip.cu -lglut -lGL ./cudaip ******************************************************************************/ #define width 100 #define height 72 unsigned char results[width * height]; unsigned char image[] = {255,255,255,255,0,0,255,255,255,255,255,255,255,0,255,255,0,0, 255,0,0,255,255,255,255,0,0,0,0,255,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255, 0,0,255,0,0,255,255,0,255,255,255,255,255,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,255,255, 255,255,0,0,255,255,0,0,0,0,255,255,0,0,255,255,255,255,255, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255, 255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255, 255,255,0,0,0,0,0,0,0,255,255,0,0,0,0,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255, 255,255,0,0,255,255,255,0,0,255,255,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,255,255,255,0,0,0,255,255,255,255,255,255,0,0, 0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 255,255,0,0,255,255,255,0,0,255,255,255,255,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,255,255,255,255,0,0,255,255,255,255, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,255,0,0,255,255,255,0,0,255,255,255,255,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,0,255,255,255,255,255, 0,255,255,255,255,0,0,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,255,255,255,0,0,0,255,255,0,0,0,0,0,0, 0,0,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,0, 0,0,0,255,255,255,0,0,255,255,0,0,255,255,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,255,255,255,255,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255, 0,0,255,255,255,0,0,255,255,255,255,0,0,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,0,0,255,255,0,0,255,255,255,0,0,255,255,255,255,0,0,255, 255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255, 255,255,255,255,255,255,255,0,0,255,0,0,255,255,255,255,0,0,255, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,255,255,255,0,0,255,255,255, 255,255,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255,255, 255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255, 255,0,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255, 255,255,0,0,0,255,0,0,255,255,255,255,255,0,255,255,255,255,255, 255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255, 255,0,0,255,255,255,255,255,0,0,255,0,0,255,255,255,255,255,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,0,0, 255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255,255,255,255, 0,0,255,255,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,0,0, 255,255,255,255,255,255,0,255,255,0,0,255,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,255,0,0,255,255,255,255,0, 0,255,255,255,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0, 255,255,255,0,0,255,255,255,255,255,0,0,255,255,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,0, 0,255,255,255,0,0,255,255,255,0,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,0,0,0,255,255,0,0,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,255,255,0,0,255,255,255,255,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,255, 255,255,255,255,0,0,255,255,0,0,0,255,255,255,255,0,0,255,255, 255,255,255,255,255,255,0,255,255,255,0,0,255,255,255,255,0,0,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255, 255,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255, 255,0,0,255,255,255,255,255,255,255,255,0,0,0,255,0,0,255,255, 255,255,0,0,255,255,255,255,255,255,255,255,255,0,255,255,255,0,0, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,255,0,0,255,255,255,255,255,255,255,0,0,0,255, 255,255,255,0,0,255,255,0,0,255,0,0,0,0,255,255,255,0,0, 255,255,255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,0, 255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,255,255,255,255,255,0,0,255,255,0,0,255,0,0,0,0, 255,255,0,0,255,255,255,255,0,0,255,255,0,0,0,0,0,255,255, 255,255,255,255,0,0,255,255,255,0,0,0,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,0,0, 0,0,0,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,0, 0,0,255,255,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,0, 0,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,255,255,0,0,255,255,255,255,255,255,255,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 255,255,255,255,255,0,0,255,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255, 255,255,255,255,0,0,0,0,0,255,255,0,0,255,255,0,0,255,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,255,255,255,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,0,0,255,255,255, 255,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255, 255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255, 0,0,255,255,255,0,0,0,255,255,255,255,255,255,255,0,0,255,255, 255,255,255,0,0,255,255,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255,255,255, 0,0,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255, 255,0,255,255,255,0,0,0,0,255,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 255,255,0,0,255,255,0,0,255,255,0,0,255,0,255,255,0,0,255, 255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255, 255,255,255,0,0,255,255,255,0,0,255,255,0,255,255,0,255,255,0, 0,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,0,0, 0,0,255,255,255,0,255,255,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,255,255,0,0,255,255,255,0,0,255,255,255,255, 0,0,255,0,0,0,0,255,255,255,0,0,255,255,255,0,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,255,255,0,0,255,255,255,0,0,0,255,0, 0,255,255,255,255,0,0,255,255,0,0,0,255,255,255,255,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,255,255,0,0,255,255,255,0,0,255,255,255, 0,0,0,0,0,0,255,255,255,255,255,0,0,255,0,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255,255,255, 0,0,255,255,255,255,0,0,0,0,0,0,255,255,255,255,0,0,255, 255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,0,255, 255,0,0,0,0,0,0,0,0,255,255,0,0,255,255,0,0,0,0, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,255,255,0,0,255,255,255,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,255,255,0,0,0,0,255,255,0,0,0,255,255,0,0, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,255,255,255,0,0,0,255,255,0,0,0,255,255,0,0,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255, 255,0,0,0,0,0,255,0,0,255,255,255,0,0,255,255,255,255,0, 0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,0,0,255,255,255,0,0,0,0,255,255,0,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,255,255,0,0,0,255,255,0,0,255,255,255,0,0, 255,255,255,255,0,0,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,255,255,0,0,255,255,0,0,255,255,0,0,0,0, 255,255,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,255,255,255,255,255,255,255,0,0,255,255,0,0, 255,255,255,255,0,0,255,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,255,255,0,0,255,0,0,255, 255,0,0,255,0,0,255,255,0,0,255,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255, 0,0,255,0,0,255,255,255,255,0,0,0,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,255,255,0, 0,255,255,0,0,255,0,0,255,255,0,255,255,0,0,255,255,255,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255, 255,255,0,0,255,255,0,0,0,0,255,255,255,255,255,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255, 255,0,0,255,255,0,0,255,255,0,0,0,0,255,255,0,0,255,255, 0,0,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,255,255,255,255,0,255,255,255,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255, 0,0,255,255,255,255,0,0,255,255,0,0,255,255,0,0,0,0,255, 255,255,0,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,255,255,255,0,0,0,255,255,255,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,255,255,255,255,255,0,0,255,0,0,0,0,0,255,255,0,0,255, 0,0,0,0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,255,0,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,255,255,255,255,0,0,0,0,0,255,0,0, 255,255,0,0,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0, 0,255,255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,0, 255,255,255,255,0,0,255,255,0,0,255,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,255, 255,255,255,0,0,255,255,255,255,0,0,255,255,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,0,0,255,255,255,0,0,0,0,255,255,255,255,255,255,255, 255,255,0,0,255,255,255,255,0,0,255,255,255,255,255,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,255,255,255,0,0,255,255,0,0,0,0,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,255,255,255,255,255,255,0,0,255,255,0, 0,255,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,255,255,255,255,255,0,0,255,255,255,255,255,255, 255,0,0,255,0,0,255,255,0,255,255,255,255,255,255,255,255,0,0, 0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,255,255,255,255,0,0,255, 255,255,255,255,255,255,0,0,0,0,0,255,255,0,0,255,255,255,255, 255,255,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,0,0,0,0,255, 255,255,255,0,0,0,0,0,0,0,255,255,0,0,0,0,255,255,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255, 0,0,0,0,0,255,255,255,0,0,0,0,0,255,255,255,255,0,0, 0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0, 0,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,255,255, 255,255,255,255,255,0,0,255,255,0,0,255,255,255,0,0,255,255,255, 255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255, 0,0,255,255,255,255,255,255,255,255,255,0,255,255,255,0,0,255,255, 0,0,255,255,255,255,0,0,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,0,0,255, 255,255,0,0,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255, 0,255,255,255,255,255,255,255,255,255,255,0,0,255,0,0,0,0,255, 255,255,0,0,255,255,255,0,0,0,255,0,0,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0, 0,0,255,255,255,0,0,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,255,255,255,255,255,0,0,255,255,255,255,0,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,255,255,0,0,0,255,255,255,255,0,255,255,255,255,255,255,255, 255,255,255,0,0,255,255,255,255,255,255,255,0,0,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 255,255,0,0,0,255,255,255,255,255,0,255,255,255,255,255,0,0,255, 255,255,255,255,255,255,255,255,0,0,255,255,255,255,255,0,255,0,0, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,0,0,0,255,255,255,255,255,0,255,255,255, 255,255,255,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0, 0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,255,255,255,0,0,255,255,255,0,0,0,255,255,255,255, 255,0,255,255,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255, 0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,255,255,255,0,255,255,255,255,0,255,255,255,255,0, 0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,255,255,255,255,0,255,255,255,255,0, 255,255,255,255,0,255,255,255,255,255,255,255,0,255,255,255,255,255,255, 0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,255,255,0,0,0,255,255,255,255,0,0, 255,255,255,255,0,0,255,255,255,0,255,255,255,255,255,255,255,0,255, 0,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,255, 255,255,255,0,0,0,255,255,255,0,0,255,255,255,255,255,255,255,255, 255,255,255,0,255,0,0,255,0,0,0,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,0,0,255,255, 255,255,0,0,255,255,255,255,0,0,0,255,255,255,255,0,255,255,255, 255,255,255,255,0,255,255,255,255,255,0,0,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255, 255,0,0,255,255,255,255,0,0,255,255,255,255,0,0,0,255,255,255, 255,0,0,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, 255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255, 0,0,255,255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,0, 0,0,255,255,255,255,0,0,255,255,255,255,255,255,0,0,255,255,255 }; __global__ void detect_edges(unsigned char *in, unsigned char *out) { int i = (blockIdx.x * 72) + threadIdx.x; int x, y; // the pixel of interest int b, d, f, h; // the pixels adjacent to x,y used for the calculation int r; // the result of calculate y = i / width; x = i - (width * y); if (x == 0 || y == 0 || x == width - 1 || y == height - 1) { out[i] = 0; } else { b = i + width; d = i - 1; f = i + 1; h = i - width; r = (in[i] * 4) + (in[b] * -1) + (in[d] * -1) + (in[f] * -1) + (in[h] * -1); if (r > 0) { // if the result is positive this is an edge pixel out[i] = 255; } else { out[i] = 0; } } } void tidy_and_exit() { exit(0); } void sigint_callback(int signal_number){ printf("\nInterrupt from keyboard\n"); tidy_and_exit(); } static void display() { glClear(GL_COLOR_BUFFER_BIT); glRasterPos4i(-1, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image); glRasterPos4i(0, -1, 0, 1); glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results); glFlush(); } static void key_pressed(unsigned char key, int x, int y) { switch(key){ case 27: tidy_and_exit(); break; default: printf("\nPress escape to exit\n"); break; } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char **argv) { unsigned char *d_results; unsigned char *d_image; cudaMalloc((void**)&d_image, sizeof(unsigned char) * (width * height)); cudaMalloc((void**)&d_results, sizeof(unsigned char) * (width * height)); cudaMemcpy(d_image, &image, sizeof(unsigned char) * (width * height), cudaMemcpyHostToDevice); signal(SIGINT, sigint_callback); struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); detect_edges<<<100,72>>>(d_image, d_results); cudaThreadSynchronize(); cudaMemcpy(&results, d_results, sizeof(unsigned char) * (width * height), cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); cudaFree(&d_image); cudaFree(&d_results); glutInit(&argc, argv); glutInitWindowSize(width * 2,height); glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE); glutCreateWindow("6CS005 Image Progessing Courework"); glutDisplayFunc(display); glutKeyboardFunc(key_pressed); glClearColor(0.0, 1.0, 0.0, 1.0); glutMainLoop(); tidy_and_exit(); return 0; }
c7a139fe497f9fe3649c4aafb6a0eb11f8f8c7fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 1024000 __global__ void add(int *data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { data[i]++; } } int main() { int data[N]; int *dev_data; int i; // Allocate memory on the GPU. hipMalloc((void**)&dev_data, N * sizeof(int)); // Initialize data. for (i=0; i<N; i++) { data[i] = 0; } // Copy data to the GPU. hipMemcpy(dev_data, data, N * sizeof(int), hipMemcpyHostToDevice); for (i=0; i<100; i++) { hipLaunchKernelGGL(( add), dim3(32), dim3(1024), 0, 0, dev_data); } hipDeviceSynchronize(); // Copy data from the GPU. hipMemcpy(data, dev_data, N * sizeof(int), hipMemcpyDeviceToHost); // Free memory allocated on the GPU. hipFree(dev_data); for (i=0; i<N; i++) { printf("%d\n", data[i]); } return 0; }
c7a139fe497f9fe3649c4aafb6a0eb11f8f8c7fb.cu
#include <stdio.h> #define N 1024000 __global__ void add(int *data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { data[i]++; } } int main() { int data[N]; int *dev_data; int i; // Allocate memory on the GPU. cudaMalloc((void**)&dev_data, N * sizeof(int)); // Initialize data. for (i=0; i<N; i++) { data[i] = 0; } // Copy data to the GPU. cudaMemcpy(dev_data, data, N * sizeof(int), cudaMemcpyHostToDevice); for (i=0; i<100; i++) { add<<<32, 1024>>>(dev_data); } cudaDeviceSynchronize(); // Copy data from the GPU. cudaMemcpy(data, dev_data, N * sizeof(int), cudaMemcpyDeviceToHost); // Free memory allocated on the GPU. cudaFree(dev_data); for (i=0; i<N; i++) { printf("%d\n", data[i]); } return 0; }
bcbce4f1beec0562005e1fdf01965f7532bbf351.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: __global__ __global__ void kernScan(int n, const int pow, int *odata, const int *idata) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) return; odata[index] = (index >= pow) ? idata[index - pow] + idata[index] : idata[index]; } __global__ void kernInToEx(int n, int *odata, const int *idata) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) return; odata[index] = (index == 0) ? 0 : idata[index - 1]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // Create device arrays int *dev_odata, *dev_idata; int nsize = n * sizeof(int); hipMalloc((void**)&dev_odata, nsize); checkCUDAError("hipMalloc for dev_odata failed!"); hipMalloc((void**)&dev_idata, nsize); checkCUDAError("hipMalloc for dev_idata failed!"); // Copy device arrays to device hipMemcpy(dev_odata, odata, nsize, hipMemcpyHostToDevice); checkCUDAError("hipMemcpy for dev_odata failed!"); hipMemcpy(dev_idata, idata, nsize, hipMemcpyHostToDevice); checkCUDAError("hipMemcpy for dev_idata failed!"); // Compute block per grid and thread per block dim3 numBlocks((n + blockSize - 1) / blockSize); dim3 numThreads(blockSize); timer().startGpuTimer(); // Naive Scan - Creates inclusive scan output int levels = ilog2ceil(n); for (int d = 1; d <= levels; d++) { int pow = 1 << (d - 1); hipLaunchKernelGGL(( kernScan) , dim3(numBlocks), dim3(numThreads) , 0, 0, n, pow, dev_odata, dev_idata); checkCUDAError("kernScan failed for level " + levels); std::swap(dev_odata, dev_idata); } // Convert inclusive scan to exclusive hipLaunchKernelGGL(( kernInToEx) , dim3(numBlocks), dim3(numThreads) , 0, 0, n, dev_odata, dev_idata); checkCUDAError("kernInToEx failed!"); timer().endGpuTimer(); // Copy device arrays back to host hipMemcpy(odata, dev_odata, nsize, hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy (device to host) for dev_odata failed!"); // Free memory hipFree(dev_odata); hipFree(dev_idata); checkCUDAError("hipFree failed!"); } } }
bcbce4f1beec0562005e1fdf01965f7532bbf351.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // TODO: __global__ __global__ void kernScan(int n, const int pow, int *odata, const int *idata) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) return; odata[index] = (index >= pow) ? idata[index - pow] + idata[index] : idata[index]; } __global__ void kernInToEx(int n, int *odata, const int *idata) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) return; odata[index] = (index == 0) ? 0 : idata[index - 1]; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // Create device arrays int *dev_odata, *dev_idata; int nsize = n * sizeof(int); cudaMalloc((void**)&dev_odata, nsize); checkCUDAError("cudaMalloc for dev_odata failed!"); cudaMalloc((void**)&dev_idata, nsize); checkCUDAError("cudaMalloc for dev_idata failed!"); // Copy device arrays to device cudaMemcpy(dev_odata, odata, nsize, cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy for dev_odata failed!"); cudaMemcpy(dev_idata, idata, nsize, cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy for dev_idata failed!"); // Compute block per grid and thread per block dim3 numBlocks((n + blockSize - 1) / blockSize); dim3 numThreads(blockSize); timer().startGpuTimer(); // Naive Scan - Creates inclusive scan output int levels = ilog2ceil(n); for (int d = 1; d <= levels; d++) { int pow = 1 << (d - 1); kernScan <<<numBlocks, numThreads >>> (n, pow, dev_odata, dev_idata); checkCUDAError("kernScan failed for level " + levels); std::swap(dev_odata, dev_idata); } // Convert inclusive scan to exclusive kernInToEx <<<numBlocks, numThreads >>> (n, dev_odata, dev_idata); checkCUDAError("kernInToEx failed!"); timer().endGpuTimer(); // Copy device arrays back to host cudaMemcpy(odata, dev_odata, nsize, cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy (device to host) for dev_odata failed!"); // Free memory cudaFree(dev_odata); cudaFree(dev_idata); checkCUDAError("cudaFree failed!"); } } }
0cc0d04f5db0d74dc6b943f551ea1cd9e81cb485.hip
// !!! This is a file automatically generated by hipify!!! /** * gramschmidt.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <hip/hip_runtime.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #define SMALL_FLOAT_VAL 0.00000001f double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } float absVal(float a) { if (a < 0) { return (a * -1); } else { return a; } } float percentDiff(double val1, double val2) { if ((absVal(val1) < 0.01) && (absVal(val2) < 0.01)) { return 0.0f; } else { return 100.0f * (absVal(absVal(val1 - val2) / absVal(val1 + SMALL_FLOAT_VAL))); } } // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define M 2048 #define N 2048 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gramschmidt(DATA_TYPE *A, DATA_TYPE *R, DATA_TYPE *Q) { int i, j, k; DATA_TYPE nrm; for (k = 0; k < N; k++) { nrm = 0; for (i = 0; i < M; i++) { nrm += A[i * N + k] * A[i * N + k]; } R[k * N + k] = sqrt(nrm); for (i = 0; i < M; i++) { Q[i * N + k] = A[i * N + k] / R[k * N + k]; } for (j = k + 1; j < N; j++) { R[k * N + j] = 0; for (i = 0; i < M; i++) { R[k * N + j] += Q[i * N + k] * A[i * N + j]; } for (i = 0; i < M; i++) { A[i * N + j] = A[i * N + j] - Q[i * N + k] * R[k * N + j]; } } } } void init_array(DATA_TYPE *A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i * N + j] = ((DATA_TYPE)(i + 1) * (j + 1)) / (M + 1); } } } void compareResults(DATA_TYPE *A, DATA_TYPE *A_outputFromGpu) { int i, j, fail; fail = 0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { if (percentDiff(A[i * N + j], A_outputFromGpu[i * N + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; printf("i: %d j: %d \n1: %f\n 2: %f\n", i, j, A[i * N + j], A_outputFromGpu[i * N + j]); } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n", GPU_DEVICE, deviceProp.name); hipSetDevice(GPU_DEVICE); return; } __global__ void gramschmidt_kernel1(DATA_TYPE __attribute__((annotate("2048:2048"))) * a, DATA_TYPE __attribute__((annotate("2048:2048"))) * r, DATA_TYPE __attribute__((annotate("2048:2048"))) * q, int __attribute__((annotate("0"))) k) __attribute__((annotate("1:256"))) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { DATA_TYPE nrm = 0.0; int i; for (i = 0; i < M; i++) { nrm += a[i * N + k] * a[i * N + k]; } r[k * N + k] = sqrt(nrm); } } __global__ void gramschmidt_kernel2(DATA_TYPE __attribute__((annotate("2048:2048"))) * a, DATA_TYPE __attribute__((annotate("2048:2048"))) * r, DATA_TYPE __attribute__((annotate("2048:2048"))) * q, int __attribute__((annotate("0"))) k) __attribute__((annotate("8:256"))) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < M) { q[i * N + k] = a[i * N + k] / r[k * N + k]; } } __global__ void gramschmidt_kernel3(DATA_TYPE __attribute__((annotate("2048:2048"))) * a, DATA_TYPE __attribute__((annotate("2048:2048"))) * r, DATA_TYPE __attribute__((annotate("2048:2048"))) * q, int __attribute__((annotate("0"))) k) __attribute__((annotate("8:256"))) { int j = blockIdx.x * blockDim.x + threadIdx.x; if ((j > k) && (j < N)) { r[k * N + j] = 0.0; int i; for (i = 0; i < M; i++) { r[k * N + j] += q[i * N + k] * a[i * N + j]; } for (i = 0; i < M; i++) { a[i * N + j] -= q[i * N + k] * r[k * N + j]; } } } void gramschmidtCuda(DATA_TYPE *A, DATA_TYPE *R, DATA_TYPE *Q, DATA_TYPE *A_outputFromGpu) { double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 gridKernel1(1, 1); dim3 gridKernel2((size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X)), 1); dim3 gridKernel3((size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X)), 1); DATA_TYPE *A_gpu; DATA_TYPE *R_gpu; DATA_TYPE *Q_gpu; hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * M * N); hipMalloc((void **)&R_gpu, sizeof(DATA_TYPE) * M * N); hipMalloc((void **)&Q_gpu, sizeof(DATA_TYPE) * M * N); hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * M * N, hipMemcpyHostToDevice); t_start = rtclock(); int k; for (k = 0; k < N; k++) { hipLaunchKernelGGL(( gramschmidt_kernel1), dim3(gridKernel1), dim3(block), 0, 0, A_gpu, R_gpu, Q_gpu, k); hipDeviceSynchronize(); hipLaunchKernelGGL(( gramschmidt_kernel2), dim3(gridKernel2), dim3(block), 0, 0, A_gpu, R_gpu, Q_gpu, k); hipDeviceSynchronize(); hipLaunchKernelGGL(( gramschmidt_kernel3), dim3(gridKernel3), dim3(block), 0, 0, A_gpu, R_gpu, Q_gpu, k); hipDeviceSynchronize(); break; } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); hipMemcpy(A_outputFromGpu, A_gpu, sizeof(DATA_TYPE) * M * N, hipMemcpyDeviceToHost); hipFree(A_gpu); hipFree(R_gpu); hipFree(Q_gpu); } int main(int argc, char *argv[]) { // double t_start, t_end; DATA_TYPE *A; DATA_TYPE *A_outputFromGpu; DATA_TYPE *R; DATA_TYPE *Q; A = (DATA_TYPE *)malloc(M * N * sizeof(DATA_TYPE)); A_outputFromGpu = (DATA_TYPE *)malloc(M * N * sizeof(DATA_TYPE)); R = (DATA_TYPE *)malloc(M * N * sizeof(DATA_TYPE)); Q = (DATA_TYPE *)malloc(M * N * sizeof(DATA_TYPE)); init_array(A); GPU_argv_init(); gramschmidtCuda(A, R, Q, A_outputFromGpu); // t_start = rtclock(); // gramschmidt(A, R, Q); // t_end = rtclock(); // fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); // compareResults(A, A_outputFromGpu); free(A); free(A_outputFromGpu); free(R); free(Q); return 0; }
0cc0d04f5db0d74dc6b943f551ea1cd9e81cb485.cu
/** * gramschmidt.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <cuda.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #define SMALL_FLOAT_VAL 0.00000001f double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } float absVal(float a) { if (a < 0) { return (a * -1); } else { return a; } } float percentDiff(double val1, double val2) { if ((absVal(val1) < 0.01) && (absVal(val2) < 0.01)) { return 0.0f; } else { return 100.0f * (absVal(absVal(val1 - val2) / absVal(val1 + SMALL_FLOAT_VAL))); } } // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define M 2048 #define N 2048 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gramschmidt(DATA_TYPE *A, DATA_TYPE *R, DATA_TYPE *Q) { int i, j, k; DATA_TYPE nrm; for (k = 0; k < N; k++) { nrm = 0; for (i = 0; i < M; i++) { nrm += A[i * N + k] * A[i * N + k]; } R[k * N + k] = sqrt(nrm); for (i = 0; i < M; i++) { Q[i * N + k] = A[i * N + k] / R[k * N + k]; } for (j = k + 1; j < N; j++) { R[k * N + j] = 0; for (i = 0; i < M; i++) { R[k * N + j] += Q[i * N + k] * A[i * N + j]; } for (i = 0; i < M; i++) { A[i * N + j] = A[i * N + j] - Q[i * N + k] * R[k * N + j]; } } } } void init_array(DATA_TYPE *A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i * N + j] = ((DATA_TYPE)(i + 1) * (j + 1)) / (M + 1); } } } void compareResults(DATA_TYPE *A, DATA_TYPE *A_outputFromGpu) { int i, j, fail; fail = 0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { if (percentDiff(A[i * N + j], A_outputFromGpu[i * N + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; printf("i: %d j: %d \n1: %f\n 2: %f\n", i, j, A[i * N + j], A_outputFromGpu[i * N + j]); } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n", GPU_DEVICE, deviceProp.name); cudaSetDevice(GPU_DEVICE); return; } __global__ void gramschmidt_kernel1(DATA_TYPE __attribute__((annotate("2048:2048"))) * a, DATA_TYPE __attribute__((annotate("2048:2048"))) * r, DATA_TYPE __attribute__((annotate("2048:2048"))) * q, int __attribute__((annotate("0"))) k) __attribute__((annotate("1:256"))) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { DATA_TYPE nrm = 0.0; int i; for (i = 0; i < M; i++) { nrm += a[i * N + k] * a[i * N + k]; } r[k * N + k] = sqrt(nrm); } } __global__ void gramschmidt_kernel2(DATA_TYPE __attribute__((annotate("2048:2048"))) * a, DATA_TYPE __attribute__((annotate("2048:2048"))) * r, DATA_TYPE __attribute__((annotate("2048:2048"))) * q, int __attribute__((annotate("0"))) k) __attribute__((annotate("8:256"))) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < M) { q[i * N + k] = a[i * N + k] / r[k * N + k]; } } __global__ void gramschmidt_kernel3(DATA_TYPE __attribute__((annotate("2048:2048"))) * a, DATA_TYPE __attribute__((annotate("2048:2048"))) * r, DATA_TYPE __attribute__((annotate("2048:2048"))) * q, int __attribute__((annotate("0"))) k) __attribute__((annotate("8:256"))) { int j = blockIdx.x * blockDim.x + threadIdx.x; if ((j > k) && (j < N)) { r[k * N + j] = 0.0; int i; for (i = 0; i < M; i++) { r[k * N + j] += q[i * N + k] * a[i * N + j]; } for (i = 0; i < M; i++) { a[i * N + j] -= q[i * N + k] * r[k * N + j]; } } } void gramschmidtCuda(DATA_TYPE *A, DATA_TYPE *R, DATA_TYPE *Q, DATA_TYPE *A_outputFromGpu) { double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 gridKernel1(1, 1); dim3 gridKernel2((size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X)), 1); dim3 gridKernel3((size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X)), 1); DATA_TYPE *A_gpu; DATA_TYPE *R_gpu; DATA_TYPE *Q_gpu; cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * M * N); cudaMalloc((void **)&R_gpu, sizeof(DATA_TYPE) * M * N); cudaMalloc((void **)&Q_gpu, sizeof(DATA_TYPE) * M * N); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * M * N, cudaMemcpyHostToDevice); t_start = rtclock(); int k; for (k = 0; k < N; k++) { gramschmidt_kernel1<<<gridKernel1, block>>>(A_gpu, R_gpu, Q_gpu, k); cudaDeviceSynchronize(); gramschmidt_kernel2<<<gridKernel2, block>>>(A_gpu, R_gpu, Q_gpu, k); cudaDeviceSynchronize(); gramschmidt_kernel3<<<gridKernel3, block>>>(A_gpu, R_gpu, Q_gpu, k); cudaDeviceSynchronize(); break; } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); cudaMemcpy(A_outputFromGpu, A_gpu, sizeof(DATA_TYPE) * M * N, cudaMemcpyDeviceToHost); cudaFree(A_gpu); cudaFree(R_gpu); cudaFree(Q_gpu); } int main(int argc, char *argv[]) { // double t_start, t_end; DATA_TYPE *A; DATA_TYPE *A_outputFromGpu; DATA_TYPE *R; DATA_TYPE *Q; A = (DATA_TYPE *)malloc(M * N * sizeof(DATA_TYPE)); A_outputFromGpu = (DATA_TYPE *)malloc(M * N * sizeof(DATA_TYPE)); R = (DATA_TYPE *)malloc(M * N * sizeof(DATA_TYPE)); Q = (DATA_TYPE *)malloc(M * N * sizeof(DATA_TYPE)); init_array(A); GPU_argv_init(); gramschmidtCuda(A, R, Q, A_outputFromGpu); // t_start = rtclock(); // gramschmidt(A, R, Q); // t_end = rtclock(); // fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); // compareResults(A, A_outputFromGpu); free(A); free(A_outputFromGpu); free(R); free(Q); return 0; }
b6c400856cbdd9ec9b07e02821a9ad06bdf2534b.hip
// !!! This is a file automatically generated by hipify!!! /** * gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Will Killian <killian@udel.edu> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <hip/hip_runtime.h> #define POLYBENCH_TIME 1 #include "gesummv.cuh" #include "../../common/polybench.h" #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f #define RUN_ON_CPU void gesummv(int n, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_1D(tmp,N,n), DATA_TYPE POLYBENCH_1D(x,N,n), DATA_TYPE POLYBENCH_1D(y,N,n)) { int i, j; for (i = 0; i < _PB_N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < _PB_N; j++) { tmp[i] = A[i][j] * x[j] + tmp[i]; y[i] = B[i][j] * x[j] + y[i]; } y[i] = alpha * tmp[i] + beta * y[i]; } } void init(int n, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_1D(x,N,n)) { int i, j; *alpha = 43532; *beta = 12313; for (i = 0; i < n; i++) { x[i] = ((DATA_TYPE) i) / N; for (j = 0; j < n; j++) { A[i][j] = ((DATA_TYPE) i*j) / N; B[i][j] = ((DATA_TYPE) i*j) / n; } } } void compareResults(int n, DATA_TYPE POLYBENCH_1D(y,N,n), DATA_TYPE POLYBENCH_1D(y_outputFromGpu,N,n)) { int i, fail; fail = 0; for (i=0; i<n; i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void gesummv_kernel(int n, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* tmp, DATA_TYPE* x, DATA_TYPE* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < _PB_N) { int j; for(j = 0; j < _PB_N; j++) { tmp[i] += A[i * N + j] * x[j]; y[i] += B[i * N + j] * x[j]; } y[i] = alpha * tmp[i] + beta * y[i]; } } void gesummvCuda(int n, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_1D(tmp,N,n), DATA_TYPE POLYBENCH_1D(x,N,n), DATA_TYPE POLYBENCH_1D(y,N,n), DATA_TYPE POLYBENCH_1D(y_outputFromGpu,N,n)) { DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * N); hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * N); hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * N); hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * N); hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * N); hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice); hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice); hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice); hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice); hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); /* Start timer. */ polybench_start_instruments; hipLaunchKernelGGL(( gesummv_kernel), dim3(grid), dim3(block), 0, 0, n, alpha, beta, A_gpu, B_gpu, tmp_gpu, x_gpu, y_gpu); hipDeviceSynchronize(); /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_1D(y,N,n)) { int i; for (i = 0; i < n; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } } int main(int argc, char *argv[]) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,N,n,n); POLYBENCH_1D_ARRAY_DECL(tmp,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(x,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(y,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(y_outputFromGpu,DATA_TYPE,N,n); init(n, &alpha, &beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(x)); GPU_argv_init(); gesummvCuda(n, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(tmp), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(y_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; gesummv(n, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(tmp), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(n, POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(y_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(y_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(tmp); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(y_outputFromGpu); return 0; } #include "../../common/polybench.c"
b6c400856cbdd9ec9b07e02821a9ad06bdf2534b.cu
/** * gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Will Killian <killian@udel.edu> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <cuda.h> #define POLYBENCH_TIME 1 #include "gesummv.cuh" #include "../../common/polybench.h" #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f #define RUN_ON_CPU void gesummv(int n, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_1D(tmp,N,n), DATA_TYPE POLYBENCH_1D(x,N,n), DATA_TYPE POLYBENCH_1D(y,N,n)) { int i, j; for (i = 0; i < _PB_N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < _PB_N; j++) { tmp[i] = A[i][j] * x[j] + tmp[i]; y[i] = B[i][j] * x[j] + y[i]; } y[i] = alpha * tmp[i] + beta * y[i]; } } void init(int n, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_1D(x,N,n)) { int i, j; *alpha = 43532; *beta = 12313; for (i = 0; i < n; i++) { x[i] = ((DATA_TYPE) i) / N; for (j = 0; j < n; j++) { A[i][j] = ((DATA_TYPE) i*j) / N; B[i][j] = ((DATA_TYPE) i*j) / n; } } } void compareResults(int n, DATA_TYPE POLYBENCH_1D(y,N,n), DATA_TYPE POLYBENCH_1D(y_outputFromGpu,N,n)) { int i, fail; fail = 0; for (i=0; i<n; i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void gesummv_kernel(int n, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* tmp, DATA_TYPE* x, DATA_TYPE* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < _PB_N) { int j; for(j = 0; j < _PB_N; j++) { tmp[i] += A[i * N + j] * x[j]; y[i] += B[i * N + j] * x[j]; } y[i] = alpha * tmp[i] + beta * y[i]; } } void gesummvCuda(int n, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_1D(tmp,N,n), DATA_TYPE POLYBENCH_1D(x,N,n), DATA_TYPE POLYBENCH_1D(y,N,n), DATA_TYPE POLYBENCH_1D(y_outputFromGpu,N,n)) { DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * N); cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * N); cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * N); cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * N); cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * N); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice); cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice); cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); /* Start timer. */ polybench_start_instruments; gesummv_kernel<<< grid, block>>>(n, alpha, beta, A_gpu, B_gpu, tmp_gpu, x_gpu, y_gpu); cudaThreadSynchronize(); /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_1D(y,N,n)) { int i; for (i = 0; i < n; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } } int main(int argc, char *argv[]) { /* Retrieve problem size. */ int n = N; /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,N,n,n); POLYBENCH_1D_ARRAY_DECL(tmp,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(x,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(y,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(y_outputFromGpu,DATA_TYPE,N,n); init(n, &alpha, &beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(x)); GPU_argv_init(); gesummvCuda(n, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(tmp), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(y_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; gesummv(n, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(tmp), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(n, POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(y_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(y_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(tmp); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(y_outputFromGpu); return 0; } #include "../../common/polybench.c"
b6338fac458619af025ea3133fc1bc663e9401cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> /* Slide 10: Each SM can only take up to 768 threads. Ex: 256 threads/block x 3 block, or 128 x 6 blocks etc. */ #define TILE_WIDTH 2 //CHANGE THIS TO ARBITRARY __global__ void matrixMult (float *a, float *b, float *c, int width); __global__ void blankCall() {int i = 0; if (i == 0) {} }; //Dummy calls does nothing. Setting int i so it actually does something (simple). Rest because don't like warnings when compiling! int main (int argc, char *argv[]) { /* Notes: floating values are set to output with 2 decimals only. Without limitation, product.dat ended up being humongous in size! */ if (argc != 2) { printf( "\nError: Number of arguments incorrect.\n" "There can only be 1 additional argument, which is the row/column length.\n" "Ex: ./MatrixMult 512\n" "Program gracefully terminated.\n"); exit(0); } FILE *f = fopen("product.dat", "w+"); if (f == NULL) { printf("File could not be created/opened!\n" "Program gracefully terminated.\n"); exit(1); } int i, j; int dim = atoi(argv[1]); if (dim < 0) { printf("Input must be a positive number!\n" "Program gracefully terminated.\n"); exit(0); } //CPU pointers to 2d array in heap float *matrix1 = (float *)malloc(dim * dim * sizeof(float)); float *matrix2 = (float *)malloc(dim * dim * sizeof(float)); float *result = (float *)malloc(dim * dim * sizeof(float)); //Populating the two matrices (and printing matrix 1) //srand(time(NULL)); srand48(time(NULL)); for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { //*(matrix1 + i*dim + j) = ((float)rand()/(float)(RAND_MAX)) * 4; //Mod for smaller, easier to hand check //*(matrix2 + i*dim + j) = ((float)rand()/(float)(RAND_MAX)) * 4; *(matrix1 + i*dim + j) = drand48() * 2; *(matrix2 + i*dim + j) = drand48() * 2; /* 2) Yes. For the random numbers, I suggest that you use either srand48()/drand48(), which is a random number generator for floats and floats or cuRand() but the latter is only available for GPUs. Both are significantly faster than what youre doing. */ //printf("%.2f ", *(matrix1 + i*dim + j)); //Print Matrix1 //fprintf(f, "%.2f\t", *(matrix1 + i*dim + j)); } //printf("\n"); //fprintf(f, "\n"); } //Print matrix2 /* //printf("\n"); fprintf(f, "\n"); for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { //printf("%.2f ", *(matrix2 + i*dim + j)); //fprintf(f, "%.2f\t", *(matrix2 + i*dim + j)); } //printf("\n"); //fprintf(f, "\n"); } //printf("\n"); //fprintf(f, "\n"); */ //GPU pointers for 2d array float *dev_a, *dev_b, *dev_c; int size = dim * dim * sizeof(float); hipMalloc((void **) &dev_a, size); hipMalloc((void **) &dev_b, size); hipMalloc((void **) &dev_c, size); hipMemcpy(dev_a, matrix1, size, hipMemcpyHostToDevice); //Ptr or dereference? hipMemcpy(dev_b, matrix2, size, hipMemcpyHostToDevice); /* If tile = 151, then 18 blocks. */ //Preparing GPU call dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); //Threads in block? //Blocks in grid //dim/threads in dim, returns number of blocks dim3 dimGrid((int)ceil(dim/dimBlock.x) + 1, (int)ceil(dim/dimBlock.y) + 1); //pick a block size so that threads < 768 //Dummy call hipLaunchKernelGGL(( blankCall), dim3(1), dim3(1), 0, 0, ); //Setting up timer start clock_t start, end; double cpu_time; start = clock(); //Calling GPU hipLaunchKernelGGL(( matrixMult), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_a, dev_b, dev_c, dim); //Ex: 256 threads per block. Anything less than 27 * 27 //Synchronize before end hipDeviceSynchronize(); //Stopping the timer end = clock(); cpu_time = ((double) (end - start)) / CLOCKS_PER_SEC; //Retrieving computed data back hipMemcpy(result, dev_c, size, hipMemcpyDeviceToHost); //Saving the world hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); //Printing Result 3 for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { //printf("%.2f ", *(result + i*dim + j)); fprintf(f, "%.2f\t", *(result + i*dim + j)); } //printf("\n"); fprintf(f, "\n"); } fclose(f); printf("Time: %lf\n", cpu_time); } __global__ void matrixMult(float* A, float* B, float* C, int width) { int k; float sum = 0; int col = blockIdx.x*TILE_WIDTH + threadIdx.x; int row = blockIdx.y*TILE_WIDTH + threadIdx.y; if(col < width && row < width) { for (k = 0; k < width; k++) sum += A[row * width + k] * B[k * width + col]; C[row * width + col] = sum; } }
b6338fac458619af025ea3133fc1bc663e9401cb.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> /* Slide 10: Each SM can only take up to 768 threads. Ex: 256 threads/block x 3 block, or 128 x 6 blocks etc. */ #define TILE_WIDTH 2 //CHANGE THIS TO ARBITRARY __global__ void matrixMult (float *a, float *b, float *c, int width); __global__ void blankCall() {int i = 0; if (i == 0) {} }; //Dummy calls does nothing. Setting int i so it actually does something (simple). Rest because don't like warnings when compiling! int main (int argc, char *argv[]) { /* Notes: floating values are set to output with 2 decimals only. Without limitation, product.dat ended up being humongous in size! */ if (argc != 2) { printf( "\nError: Number of arguments incorrect.\n" "There can only be 1 additional argument, which is the row/column length.\n" "Ex: ./MatrixMult 512\n" "Program gracefully terminated.\n"); exit(0); } FILE *f = fopen("product.dat", "w+"); if (f == NULL) { printf("File could not be created/opened!\n" "Program gracefully terminated.\n"); exit(1); } int i, j; int dim = atoi(argv[1]); if (dim < 0) { printf("Input must be a positive number!\n" "Program gracefully terminated.\n"); exit(0); } //CPU pointers to 2d array in heap float *matrix1 = (float *)malloc(dim * dim * sizeof(float)); float *matrix2 = (float *)malloc(dim * dim * sizeof(float)); float *result = (float *)malloc(dim * dim * sizeof(float)); //Populating the two matrices (and printing matrix 1) //srand(time(NULL)); srand48(time(NULL)); for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { //*(matrix1 + i*dim + j) = ((float)rand()/(float)(RAND_MAX)) * 4; //Mod for smaller, easier to hand check //*(matrix2 + i*dim + j) = ((float)rand()/(float)(RAND_MAX)) * 4; *(matrix1 + i*dim + j) = drand48() * 2; *(matrix2 + i*dim + j) = drand48() * 2; /* 2) Yes. For the random numbers, I suggest that you use either srand48()/drand48(), which is a random number generator for floats and floats or cuRand() but the latter is only available for GPUs. Both are significantly faster than what you’re doing. */ //printf("%.2f ", *(matrix1 + i*dim + j)); //Print Matrix1 //fprintf(f, "%.2f\t", *(matrix1 + i*dim + j)); } //printf("\n"); //fprintf(f, "\n"); } //Print matrix2 /* //printf("\n"); fprintf(f, "\n"); for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { //printf("%.2f ", *(matrix2 + i*dim + j)); //fprintf(f, "%.2f\t", *(matrix2 + i*dim + j)); } //printf("\n"); //fprintf(f, "\n"); } //printf("\n"); //fprintf(f, "\n"); */ //GPU pointers for 2d array float *dev_a, *dev_b, *dev_c; int size = dim * dim * sizeof(float); cudaMalloc((void **) &dev_a, size); cudaMalloc((void **) &dev_b, size); cudaMalloc((void **) &dev_c, size); cudaMemcpy(dev_a, matrix1, size, cudaMemcpyHostToDevice); //Ptr or dereference? cudaMemcpy(dev_b, matrix2, size, cudaMemcpyHostToDevice); /* If tile = 151, then 18 blocks. */ //Preparing GPU call dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); //Threads in block? //Blocks in grid //dim/threads in dim, returns number of blocks dim3 dimGrid((int)ceil(dim/dimBlock.x) + 1, (int)ceil(dim/dimBlock.y) + 1); //pick a block size so that threads < 768 //Dummy call blankCall<<<1, 1>>>(); //Setting up timer start clock_t start, end; double cpu_time; start = clock(); //Calling GPU matrixMult<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, dim); //Ex: 256 threads per block. Anything less than 27 * 27 //Synchronize before end cudaThreadSynchronize(); //Stopping the timer end = clock(); cpu_time = ((double) (end - start)) / CLOCKS_PER_SEC; //Retrieving computed data back cudaMemcpy(result, dev_c, size, cudaMemcpyDeviceToHost); //Saving the world cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); //Printing Result 3 for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { //printf("%.2f ", *(result + i*dim + j)); fprintf(f, "%.2f\t", *(result + i*dim + j)); } //printf("\n"); fprintf(f, "\n"); } fclose(f); printf("Time: %lf\n", cpu_time); } __global__ void matrixMult(float* A, float* B, float* C, int width) { int k; float sum = 0; int col = blockIdx.x*TILE_WIDTH + threadIdx.x; int row = blockIdx.y*TILE_WIDTH + threadIdx.y; if(col < width && row < width) { for (k = 0; k < width; k++) sum += A[row * width + k] * B[k * width + col]; C[row * width + col] = sum; } }