serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
6,201 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
float secuential(const int array[] , int dim){
float mean=0;
for(int i=0; i<dim;i++){
mean+=array[i];
}
mean=mean/dim;
float sum=0;
for(int i=0; i<dim;i++){
sum+=(array[i]-mean)*(array[i]-mean);
}
return sqrt(sum/(dim-1));
}
__global__ void func( const int array[] , int dim,float result[], const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
//printf("sum:%i\n", result[0]);
if(index<dim){
if(dim<=thread_number){ //if more threads than array size
// printf("Thread %i; Adding value of index %i\n", index, index, array[index]);
atomicAdd(result,array[index]);
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
// printf("Thread %i; Adding value of index %i\n", index, i, array[i]);
atomicAdd(result,array[i]);
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim/thread_number); i< dim; i++){
// printf("Thread %i; Adding value of index %i\n",index, i, array[i]);
atomicAdd(result,array[i]);
}
}
}
//printf("sum:%i\n", result[0]);
}
__syncthreads();
float mean=result[0]/dim;
result[0]=0;
if(index<dim){
if(dim<=thread_number){ //if more threads than array size
//printf("Thread %i; Adding value of index %i\n", index, index, array[index]);
atomicAdd(result,(array[index]-mean)*(array[index]-mean));
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
//printf("Thread %i; Adding value of index %i\n", index, i, array[i]);
atomicAdd(result,(array[i]-mean)*(array[i]-mean));
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim/thread_number); i< dim; i++){
//printf("Thread %i; Adding value of index %i\n",index, i, array[i]);
atomicAdd(result,(array[i]-mean)*(array[i]-mean));
}
}
}
}
__syncthreads();
result[0]=sqrt(result[0]/(dim-1));
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
int size_array=9;
float *d_gpu_res=NULL;
float *h_gpu_res= 0;
bool verbose=false;
int N=1;
if(argc == 3){
size_array=atoi(argv[1]);
N=atoi(argv[2]);
}
else if(argc== 4 ){
size_array=atoi(argv[1]);
N=atoi(argv[2]);
verbose=(argv[3][0]=='v');
}
h_gpu_res=( float*)malloc(sizeof( float));
h_gpu_res[0]=0;
// malloc a host array
host_array = (int*)malloc( size_array * sizeof(int));
for(int i=0; i<size_array; i++){
host_array[i]=rand()%10;
if(argc==4 && verbose){
printf("%i\t", host_array[i]);
}
else if(argc==4)
host_array[i]=atoi(argv[3]);
}
printf("\n");
// cudaMalloc a device array
cudaMalloc(&device_array,size_array * sizeof(int));
cudaError_t er=cudaMalloc(&d_gpu_res, sizeof(float));
// download and inspect the result on the host:
cudaError_t e=cudaMemcpy(device_array, host_array, sizeof(int)*size_array, cudaMemcpyHostToDevice);
cudaError_t error=cudaMemcpy(d_gpu_res, h_gpu_res, sizeof(int), cudaMemcpyHostToDevice);
//cudaerrorinvalidvalue(11)
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos
dim3 grid(1,1); //Grid bidimensional de M*M bloques
int thread_number= N*N;
if (N*N > 512){
bloque.x = 512;
bloque.y = 512;
grid.x = ceil(double(N)/double(bloque.x));
grid.y = ceil(double(N)/double(bloque.y));
}
time_begin=clock();
func<<<grid, bloque>>>(device_array, size_array , d_gpu_res, thread_number);
cudaThreadSynchronize();
// download and inspect the result on the host:
//cudaMemcpy(host_array, device_array, sizeof(int)*size_array, cudaMemcpyDeviceToHost);
cudaMemcpy(h_gpu_res, d_gpu_res, sizeof(int), cudaMemcpyDeviceToHost);
printf("GPU time: %f seconds\t", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.215s
printf("GPU result: %f\n", h_gpu_res[0]);
time_begin=clock();
float cpu_res=secuential(host_array, size_array);
printf("CPU time: %f seconds\t", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.215s
printf("CPU result: %f\n", cpu_res);
// deallocate memory
free(host_array);free(h_gpu_res);
cudaFree(device_array); cudaFree(d_gpu_res);
} |
6,202 | #include <stdlib.h>
#include <stdio.h>
__global__ void addVectors(int *a, int *b, int *c, int n) {
int thread = threadIdx.x;
if(thread < n)
c[thread] = a[thread] + b[thread];
}
int main() {
int *a = NULL;
int *b = NULL;
int *c = NULL;
int *dev_a = NULL;
int *dev_b = NULL;
int *dev_c = NULL;
int size = 10;
a = (int *) malloc(sizeof(int) * size);
b = (int *) malloc(sizeof(int) * size);
c = (int *) malloc(sizeof(int) * size);
for(int i = 0; i < size; i++) {
a[i] = i;
b[i] = i;
}
cudaMalloc(&dev_a, size * sizeof(int));
cudaMalloc(&dev_b, size * sizeof(int));
cudaMalloc(&dev_c, size * sizeof(int));
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, size * sizeof(int), cudaMemcpyHostToDevice);
addVectors<<<1, 1024>>>(dev_a, dev_b, dev_c, size);
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
printf("Your result vector is: \n");
for(int i = 0; i < size; i++)
printf("c[%d] = %d\n", i, c[i]);
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
6,203 | #include "includes.h"
__device__ float length(float3 vec)
{
return sqrt(vec.x*vec.x + vec.y*vec.y + vec.z*vec.z);
}
__device__ float length4(float4 vec)
{
return sqrt(vec.x*vec.x + vec.y*vec.y + vec.z*vec.z);
}
__global__ void SampleVelocitiesSlicedDev(float* velocities, const uint slice, const float4* vels_data, const uint2* cellStartEnd,const uint* indices)
{
const uint cellid = gridDim.x*blockDim.x*slice + threadIdx.x*gridDim.x + blockIdx.x;
uint2 cellStEnd = cellStartEnd[cellid];
const uint part_in_cell = cellStEnd.y - cellStEnd.x;
if(part_in_cell <= 0)
{
velocities[threadIdx.x*gridDim.x + blockIdx.x] = 0;
return;
}
float4 vel,p = make_float4(0,0,0,0);
for(uint index = cellStEnd.x; index < cellStEnd.y; index++)
{
#ifndef REORDER
uint idx = indices[index];
vel = vels_data[idx];
#else
vel = vels_data[index];
#endif
p.x += vel.x;
p.y += vel.y;
p.z += vel.z;
}
velocities[threadIdx.x*gridDim.x + blockIdx.x] = length4(p) / part_in_cell;
} |
6,204 | #include <stdio.h>
#include <string.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 256
__global__ void best_shuffle(const char *s, char *r, int *diff, int n);
__device__ void update_buf(int *cnt, char *buf);
__device__ int find_max(const char *s, int *cnt, int n);
char * get_input_word(int argc, char *argv[]);
/*
* Main
*/
int main(int argc, char *argv[]){
char *t = get_input_word(argc, argv);
printf("\nword: %s\n", t);
int n = strlen(t);
int blocks, *diff;
char *r;
float total_time, comp_time;
cudaEvent_t total_start, total_stop, comp_start, comp_stop;
cudaEventCreate(&total_start);
cudaEventCreate(&total_stop);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_stop);
/*
* Memory allocation on host
*/
r = strdup(t);
diff = (int *)calloc(1, sizeof(int));
/*
* Memory allocation on device
*/
char *t_dev, *r_dev;
int *diff_dev;
cudaMalloc(&r_dev, strlen(t)*sizeof(char));
cudaMalloc(&t_dev, strlen(t)*sizeof(char));
cudaMalloc(&diff_dev, 1*sizeof(int));
cudaEventRecord(total_start);
/*
* Copy array from host memory to device memory
*/
cudaMemcpy(t_dev, t, strlen(t)*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(r_dev, t, strlen(t)*sizeof(char), cudaMemcpyHostToDevice);
cudaEventRecord(comp_start);
/*
* Create sufficient blocks
*/
blocks = (n + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
/*
* Kernel call
*/
best_shuffle<<< blocks, THREADS_PER_BLOCK >>>(t_dev, r_dev, diff_dev, n);
cudaEventRecord(comp_stop);
cudaEventSynchronize(comp_stop);
cudaEventElapsedTime(&comp_time, comp_start, comp_stop);
/*
* Copy c from host device memory to host memory
*/
cudaMemcpy(r, r_dev, strlen(t)*sizeof(char), cudaMemcpyDeviceToHost);
cudaMemcpy(diff, diff_dev, 1*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(total_stop);
cudaEventSynchronize(total_stop);
cudaEventElapsedTime(&total_time, total_start, total_stop);
/*
* Free memory on device
*/
cudaFree(diff_dev);
cudaFree(r_dev);
cudaFree(t_dev);
cudaEventDestroy(comp_start);
cudaEventDestroy(comp_stop);
cudaEventDestroy(total_start);
cudaEventDestroy(total_stop);
/*
* GPU timing
*/
printf("N: %d, blocks: %d, total_threads: %d\n", n, blocks, THREADS_PER_BLOCK*blocks);
printf("Total time (ms): %f\n", total_time);
printf("Kernel time (ms): %f\n", comp_time);
printf("Data transfer time (ms): %f\n", total_time-comp_time);
/*
* Initial word, final word & the difference between them
*/
printf("\n%s %s (%d)\n", t, r, diff[0]);
free(r);
free(diff);
return 0;
}
/*
* Function: find_max
* --------------------
* Finds the letter with maximum frequency among the other characters of the string
*
* s: pointer of the char array (constant)
* cnt: int array, which has the characters' counter role
* n: number of characters of the word
*
*/
__device__ int find_max(const char *s, int *cnt, int n){
int i, max=0;
for(i = 0; i < n; ++i)
if (++cnt[(int)s[i]] > max) max = cnt[(int)s[i]];
return max;
}
/*
* Function: update_buf
* --------------------
* Updates buf array (char), according to counter array (cnt). Buf will be used in deterministic function
*
* cnt: int array, which has the characters' counter role
* buf: char array, the one we are updating
*
*/
__device__ void update_buf(int *cnt, char *buf){
int i, j=0;
for(i = 0; i < 128; ++i)
while (cnt[i]--) buf[j++] = i;
}
/*
* Function: best_shuffle
* --------------------
* Shuffles given string (char array) with the use of a deterministic function
*
* s: pointer of the char array (constant)
* r: pointer of a copy of char array (will contain final string)
* diff: pointer of int array, which will contain the difference between initial & final string
* n: number of characters of the word
*
*/
__global__ void best_shuffle(const char *s, char *r, int *diff, int n){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n){
int i, max, cnt[128] = {0};
char buf[256] = {0};
max = find_max(s, cnt, n);
update_buf(cnt, buf);
for(i = 0; i < n; ++i){
if (r[idx] == buf[i]) {
r[idx] = buf[(i + max) % n] & ~128;
buf[i] |= 128;
break;
}
}
atomicAdd(&(diff[0]), diff[0]+(r[idx] == s[idx]));
}
}
/*
* Function: get_input_word
* --------------------
* Returns the input word the user entered (to be shuffled). If no word inserted, throws an error.
*
* argc: number of arguments
* argv: the actual arguments
*
*/
char * get_input_word(int argc, char *argv[]){
if (argc != 2){
printf("Usage: %s \"<input_word>\"", argv[0]);
exit(1);
}
return argv[1];
} |
6,205 | #include <stdio.h>
#include <iostream>
#include <cstdlib>
#include<chrono>
int main(void) {
double gammaEulera = 0.;
double N = 1000000;;
auto start = std::chrono::high_resolution_clock::now();
for (int i = 1; i < N; i++)
gammaEulera = gammaEulera + (1. / (double)i);
gammaEulera = gammaEulera - log(N);
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
printf("Przyblizona wartosc eulera CPU wynosi %f \n",gammaEulera );
printf("Czas wynosi %f",elapsed.count());
return 0;
}
|
6,206 | #include <math.h>
#include <malloc.h>
#define ABS(a) (a>0?a:-(a))
#define MAX(a,b) (a>b?a:b)
#define MIN(a,b) (a<b?a:b)
#define BLOCK_SIZE_x 16
#define BLOCK_SIZE_y 16
const float eps=1e-8;
extern "C" void Atx_cone_mf_gpu_new(float *X,float *y,float *sc,float cos_phi,float sin_phi,float *y_det,float *z_det,
float SO,float OD,float scale,float dy_det,float dz_det,float dz,int nx,int ny,int nz,int na,int nb);
inline __device__ float find_l(float x1_0,float y1_0,float x2_0,float y2_0,float dx,float dy,float x,float y)
{
float l=0,dx2,dy2,a,b,slope,tmp,tmp2,xi[2],yi[2],x1,y1;
int i;
a=x2_0-x1_0;
b=y2_0-y1_0;
dx2=dx/2.0f;
dy2=dy/2.0f;
if(a==0)
{
tmp=ABS(x1_0-x);
if(tmp<=dx2){l=dy;}
}
else
{
if(b==0)
{
tmp=ABS(y1_0-y);
if(tmp<=dy2)
{
l=dx;
}
}
else
{
x1=x1_0-x;y1=y1_0-y;
i=0;
if(ABS(a)>ABS(b))
{
slope=b/a;
tmp=slope*(-x1)+y1;
tmp2=slope*dx2;
if(ABS(tmp-tmp2)<=dy2)
{
xi[i]=-dx2;yi[i]=tmp-tmp2;i++;
}
if(ABS(tmp+tmp2)<=dy2)
{
xi[i]=dx2;yi[i]=tmp+tmp2;i++;
}
if(i<2)
{
slope=a/b;
tmp=slope*(-y1)+x1;
tmp2=slope*dy2;
if(ABS(tmp-tmp2)<=dx2)
{
yi[i]=-dy2;xi[i]=tmp-tmp2;i++;
}
if(i<2)
{
if(ABS(tmp+tmp2)<=dx2)
{
yi[i]=dy2;xi[i]=tmp+tmp2;i++;
}
}
}
}
else
{
slope=a/b;
tmp=slope*(-y1)+x1;
tmp2=slope*dy2;
if(ABS(tmp-tmp2)<=dx2)
{
yi[i]=-dy2;xi[i]=tmp-tmp2;i++;
}
if(ABS(tmp+tmp2)<=dx2)
{
yi[i]=dy2;xi[i]=tmp+tmp2;i++;
}
if(i<2)
{
slope=b/a;
tmp=slope*(-x1)+y1;
tmp2=slope*dx2;
if(ABS(tmp-tmp2)<=dy2)
{
xi[i]=-dx2;yi[i]=tmp-tmp2;i++;
}
if(i<2)
{
if(ABS(tmp+tmp2)<=dy2)
{
xi[i]=dx2;yi[i]=tmp+tmp2;i++;
}
}
}
}
if(i==2)
{ tmp=xi[1]-xi[0];tmp2=yi[1]-yi[0];
l=(float)sqrt(tmp*tmp+tmp2*tmp2);
}
}
}
return l;
}
inline __device__ float find_l_3d(float x1_0,float y1_0,float z1_0,float x2_0,float y2_0,float z2_0,float dx,float dy,float dz,float x,float y,float z)
// assuming c~=0
// A method for computing the intersecting length of a voxel with a infinitely-narrow beam
// A better formula will be supplied to improve the speed.
{
float l=0,dx2,dy2,dz2,a,b,c,slope,tmp[2],tmp2[2],tmpx,tmpy,tmpz,xi[2],yi[2],zi[2],x1,y1,z1;
int i;
a=x2_0-x1_0;b=y2_0-y1_0;c=z2_0-z1_0;
dx2=dx/2.0f;dy2=dy/2.0f;dz2=dz/2.0f;
if(a==0)
{l=find_l(y1_0,z1_0,y2_0,z2_0,dy,dz,y,z);}
else
{ if(b==0)
{l=find_l(x1_0,z1_0,x2_0,z2_0,dx,dz,x,z);}
else
{ x1=x1_0-x;y1=y1_0-y;z1=z1_0-z;
// x2=x2_0-x;y2=y2_0-y;z2=z2_0-z;
i=0;
if(ABS(a)>ABS(b))
{ slope=b/a;tmp[0]=slope*(-x1)+y1;tmp2[0]=slope*dx2;
slope=c/a;tmp[1]=slope*(-x1)+z1;tmp2[1]=slope*dx2;
if(ABS(tmp[0]-tmp2[0])<=dy2&&ABS(tmp[1]-tmp2[1])<=dz2)
{xi[i]=-dx2;yi[i]=tmp[0]-tmp2[0];zi[i]=tmp[1]-tmp2[1];i++;}
if(ABS(tmp[0]+tmp2[0])<=dy2&&ABS(tmp[1]+tmp2[1])<=dz2)
{xi[i]=dx2;yi[i]=tmp[0]+tmp2[0];zi[i]=tmp[1]+tmp2[1];i++;}
if(i<2)
{ slope=a/b;tmp[0]=slope*(-y1)+x1;tmp2[0]=slope*dy2;
slope=c/b;tmp[1]=slope*(-y1)+z1;tmp2[1]=slope*dy2;
if(ABS(tmp[0]-tmp2[0])<=dx2&&ABS(tmp[1]-tmp2[1])<=dz2)
{xi[i]=tmp[0]-tmp2[0];yi[i]=-dy2;zi[i]=tmp[1]-tmp2[1];i++;}
if(i<2)
{ if(ABS(tmp[0]+tmp2[0])<=dx2&&ABS(tmp[1]+tmp2[1])<=dz2)
{xi[i]=tmp[0]+tmp2[0];yi[i]=dy2;zi[i]=tmp[1]+tmp2[1];i++;}
}
}
if(i<2)
{ slope=a/c;tmp[0]=slope*(-z1)+x1;tmp2[0]=slope*dz2;
slope=b/c;tmp[1]=slope*(-z1)+y1;tmp2[1]=slope*dz2;
if(ABS(tmp[0]-tmp2[0])<=dx2&&ABS(tmp[1]-tmp2[1])<=dy2)
{xi[i]=tmp[0]-tmp2[0];yi[i]=tmp[1]-tmp2[1];zi[i]=-dz2;i++;}
if(i<2)
{ if(ABS(tmp[0]+tmp2[0])<=dx2&&ABS(tmp[1]+tmp2[1])<=dy2)
{xi[i]=tmp[0]+tmp2[0];yi[i]=tmp[1]+tmp2[1];zi[i]=dz2;i++;}
}
}
}
else
{ slope=a/b;tmp[0]=slope*(-y1)+x1;tmp2[0]=slope*dy2;
slope=c/b;tmp[1]=slope*(-y1)+z1;tmp2[1]=slope*dy2;
if(ABS(tmp[0]-tmp2[0])<=dx2&&ABS(tmp[1]-tmp2[1])<=dz2)
{xi[i]=tmp[0]-tmp2[0];yi[i]=-dy2;zi[i]=tmp[1]-tmp2[1];i++;}
if(ABS(tmp[0]+tmp2[0])<=dx2&&ABS(tmp[1]+tmp2[1])<=dz2)
{xi[i]=tmp[0]+tmp2[0];yi[i]=dy2;zi[i]=tmp[1]+tmp2[1];i++;}
if(i<2)
{ slope=b/a;tmp[0]=slope*(-x1)+y1;tmp2[0]=slope*dx2;
slope=c/a;tmp[1]=slope*(-x1)+z1;tmp2[1]=slope*dx2;
if(ABS(tmp[0]-tmp2[0])<=dy2&&ABS(tmp[1]-tmp2[1])<=dz2)
{xi[i]=-dx2;yi[i]=tmp[0]-tmp2[0];zi[i]=tmp[1]-tmp2[1];i++;}
if(i<2)
{ if(ABS(tmp[0]+tmp2[0])<=dy2&&ABS(tmp[1]+tmp2[1])<=dz2)
{xi[i]=dx2;yi[i]=tmp[0]+tmp2[0];zi[i]=tmp[1]+tmp2[1];i++;}
}
}
if(i<2)
{ slope=a/c;tmp[0]=slope*(-z1)+x1;tmp2[0]=slope*dz2;
slope=b/c;tmp[1]=slope*(-z1)+y1;tmp2[1]=slope*dz2;
if(ABS(tmp[0]-tmp2[0])<=dx2&&ABS(tmp[1]-tmp2[1])<=dy2)
{xi[i]=tmp[0]-tmp2[0];yi[i]=tmp[1]-tmp2[1];zi[i]=-dz2;i++;}
if(i<2)
{ if(ABS(tmp[0]+tmp2[0])<=dx2&&ABS(tmp[1]+tmp2[1])<=dy2)
{xi[i]=tmp[0]+tmp2[0];yi[i]=tmp[1]+tmp2[1];zi[i]=dz2;i++;}
}
}
}
if(i==2)
{ tmpx=xi[1]-xi[0];tmpy=yi[1]-yi[0];tmpz=zi[1]-zi[0];
l=(float)sqrt(tmpx*tmpx+tmpy*tmpy+tmpz*tmpz);
}
}
}
return l;
}
__global__ void Atx_cone_mf_gpu_new_kernel(float *x,float *y,float *sc,float cos_phi,float sin_phi,float *y_det,float *z_det,
float SO,float OD,float scale,float dy_det,float dz_det,float dz,int nx,int ny,int nz,int na,int nb)
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int ix=bx*BLOCK_SIZE_x+tx;
int iy2=by*BLOCK_SIZE_y+ty;
if(ix<nx&&iy2<ny*nz)
{
int nx2,ny2,nz2,na2,nb2,ia,ib,iy,iz,na_min,na_max,nb_min,nb_max,idx;
float xc,yc,zc,xr,yr,SD,l,tmp,x1,y1,z1,x2,y2,z2,d;
SD=SO+OD;
na2=na/2;nb2=nb/2;
nx2=nx/2;ny2=ny/2;nz2=nz/2;
d=(float)sqrt((1+dz*dz)/2);
iz=(int)floor((float)iy2/(float)ny);
iy=iy2-iz*ny;
idx=iz*ny*nx+iy*nx+ix;
zc=(float)(iz+0.5-nz2)*dz;
yc=(float)(iy+0.5-ny2);
xc=(float)(ix+0.5-nx2);
xr=cos_phi*xc+sin_phi*yc;
yr=-sin_phi*xc+cos_phi*yc;
tmp=SD/((xr+SO)*dy_det);
na_max=(int)floor((yr+1)*tmp+na2);
na_min=(int)floor((yr-1)*tmp+na2);
tmp=SD/((xr+SO)*dz_det);
nb_max=(int)floor((zc+d)*tmp+nb2);
nb_min=(int)floor((zc-d)*tmp+nb2);
for(ib=MAX(0,nb_min);ib<=MIN(nb_max,nb-1);ib++)
{
for(ia=MAX(0,na_min);ia<=MIN(na_max,na-1);ia++)
{
x1=cos_phi*(-SO);
y1=sin_phi*(-SO);
z1=0.0;
x2=cos_phi*OD-sin_phi*y_det[ia];
y2=sin_phi*OD+cos_phi*y_det[ia];
z2=z_det[ib];
l=find_l_3d(x1,y1,z1,x2,y2,z2,1.0,1.0,dz,xc,yc,zc);
x[idx]+=l*y[ib*na+ia];
sc[idx]+=l;
}
}
}
}
__global__ void set2zero(float *x,int nx,int nyz)
{ int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int ix=bx*BLOCK_SIZE_x+tx;
int iy=by*BLOCK_SIZE_y+ty;
if(ix<nx&&iy<nyz)
{x[iy*nx+ix]=0;}
}
__global__ void scalex(float *x,int nx,int nyz,float scale)
{ int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int ix=bx*BLOCK_SIZE_x+tx;
int iy=by*BLOCK_SIZE_y+ty;
if(ix<nx&&iy<nyz)
{x[iy*nx+ix]*=scale;}
}
extern "C" void Atx_cone_mf_gpu_new(float *X,float *y,float *sc,float cos_phi,float sin_phi,float *y_det,float *z_det,
float SO,float OD,float scale,float dy_det,float dz_det,float dz,int nx,int ny,int nz,int na,int nb)
{
float *x_d,*y_d,*sc_d,*y_det_d,*z_det_d;
int nd,N;
N=nx*ny*nz;
nd=na*nb;
cudaMalloc(&y_d,nd*sizeof(float));
cudaMalloc(&x_d,N*sizeof(float));
cudaMalloc(&sc_d,N*sizeof(float));
cudaMalloc(&y_det_d,na*sizeof(float));cudaMemcpy(y_det_d,y_det,na*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc(&z_det_d,nb*sizeof(float));cudaMemcpy(z_det_d,z_det,nb*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(y_d,y,nd*sizeof(float),cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid_t((nx+dimBlock.x-1)/dimBlock.x,(ny*nz+dimBlock.y-1)/dimBlock.y);
set2zero<<<dimGrid_t, dimBlock>>>(x_d,nx,ny*nz);
set2zero<<<dimGrid_t, dimBlock>>>(sc_d,nx,ny*nz);
Atx_cone_mf_gpu_new_kernel<<<dimGrid_t, dimBlock>>>(x_d,y_d,sc_d,cos_phi,sin_phi,y_det_d,z_det_d,
SO,OD,scale,dy_det,dz_det,dz,nx,ny,nz,na,nb);
cudaThreadSynchronize();
scalex<<<dimGrid_t, dimBlock>>>(x_d,nx,ny*nz,scale);
scalex<<<dimGrid_t, dimBlock>>>(sc_d,nx,ny*nz,scale);
cudaMemcpy(X,x_d,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(sc,sc_d,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(x_d);cudaFree(y_d);cudaFree(sc_d);cudaFree(y_det_d);cudaFree(z_det_d);
}
extern "C" void Ax_cone_mf_gpu_new(float *X,float *y,float *sr,float cos_phi,float sin_phi,float *y_det,float *z_det,
float SO,float OD,float scale,float dz,int nx,int ny,int nz,int na,int nb);
__global__ void Ax_cone_mf_gpu_kernel_new(float *x,float *y,float *sr,float cos_phi,float sin_phi,float *y_det,float *z_det,
float SO,float OD,float scale,float dz,int nx,int ny,int nz,int na,int nb)
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx0=threadIdx.x;
int ty0=threadIdx.y;
int ia=bx*BLOCK_SIZE_x+tx0;
int ib=by*BLOCK_SIZE_y+ty0;
if(ia<na&&ib<nb)
{
int nx2,ny2,nz2,id,ix,iy,iz,cx1,cx2,cy1,cy2,cz1,cz2;
float x1,y1,x2,y2,z1,z2,xx1,yy1,zz1,xx2,yy2,zz2,slope1,slope2,l,d,tmp,rx,ry,rz;
nx2=nx/2;
ny2=ny/2;
nz2=nz/2;
id=ib*na+ia;
x1=cos_phi*(-SO);
y1=sin_phi*(-SO);
z1=0.0;
x2=cos_phi*OD-sin_phi*y_det[ia];
y2=sin_phi*OD+cos_phi*y_det[ia];
z2=z_det[ib];
y[id]=0;
sr[id]=0;
// assuming z1-z2 is small
if(ABS(x1-x2)>ABS(y1-y2))
{ slope1=(y2-y1)/(x2-x1);
slope2=(z2-z1)/(x2-x1);
for(ix=0;ix<nx;ix++)
{ xx1=(float)(ix-nx2);xx2=xx1+1;
if(slope1>=0)
{ yy1=y1+slope1*(xx1-x1)+ny2;
yy2=y1+slope1*(xx2-x1)+ny2;
}
else
{ yy1=y1+slope1*(xx2-x1)+ny2;
yy2=y1+slope1*(xx1-x1)+ny2;
}
cy1=(int)floor(yy1);
cy2=(int)floor(yy2);
if(slope2>=0)
{ zz1=(z1+slope2*(xx1-x1))/dz+nz2;
zz2=(z1+slope2*(xx2-x1))/dz+nz2;
}
else
{ zz1=(z1+slope2*(xx2-x1))/dz+nz2;
zz2=(z1+slope2*(xx1-x1))/dz+nz2;
}
cz1=(int)floor(zz1);
cz2=(int)floor(zz2);
if(cy2==cy1)
{ if(cy1>=0&&cy1<=ny-1)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 11
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
iy=cy1;iz=cz1;y[id]+=l*x[iz*ny*nx+iy*nx+ix];sr[id]+=l;
}
}
else
{ if(cz2>0&&cz2<nz)// 12
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
iy=cy1;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
else
{ if(cz2==0)// 13
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
iy=cy1;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
if(cz2==nz)// 14
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
}
}
}
}
}
else
{ if(cy2>0&&cy2<ny)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 21
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;
iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=ry*l;
iy=cy2;iz=cz1;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-ry)*l;
}
}
else
{ if(cz2>0&&cz2<nz)// 22
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
iy=cy1;iz=cz2;y[id]+=(ry-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(ry-rz)*l;
iy=cy2;iz=cz2;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-ry)*l;
}
else
{ iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=ry*l;
iy=cy2;iz=cz1;y[id]+=(rz-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rz-ry)*l;
iy=cy2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
}
else
{ if(cz2==0)// 23
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz2;y[id]+=(ry-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(ry-rz)*l;
iy=cy2;iz=cz2;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-ry)*l;
}
else
{ iy=cy2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
}
if(cz2==nz)// 24
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
}
else
{ iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=ry*l;
iy=cy2;iz=cz1;y[id]+=(rz-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rz-ry)*l;
}
}
}
}
}
else
{ if(cy2==0)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 31
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;
iy=cy2;iz=cz1;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-ry)*l;
}
}
else
{ if(cz2>0&&cz2<nz)// 32
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy2;iz=cz2;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-ry)*l;
}
else
{ iy=cy2;iz=cz1;y[id]+=(rz-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rz-ry)*l;
iy=cy2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
}
else
{ if(cz2==0)// 33
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy2;iz=cz2;y[id]+=(1-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-ry)*l;
}
else
{ iy=cy2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
}
if(cz2==nz)// 34
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{
}
else
{ iy=cy2;iz=cz1;y[id]+=(rz-ry)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rz-ry)*l;
}
}
}
}
}
if(cy2==ny)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 41
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;
iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=ry*l;
}
}
else
{ if(cz2>0&&cz2<nz)// 42
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
iy=cy1;iz=cz2;y[id]+=(ry-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(ry-rz)*l;
}
else
{ iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=ry*l;
}
}
else
{ if(cz2==0)// 43
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz2;y[id]+=(ry-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(ry-rz)*l;
}
else
{
}
}
if(cz2==nz)// 44
{ d=yy2-yy1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ry=(cy2-yy1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(ry>rz)
{ iy=cy1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
}
else
{ iy=cy1;iz=cz1;y[id]+=ry*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=ry*l;
}
}
}
}
}
}
}
}
}
else
{ slope1=(x2-x1)/(y2-y1);
slope2=(z2-z1)/(y2-y1);
for(iy=0;iy<ny;iy++)
{ yy1=(float)(iy-ny2);yy2=yy1+1;
if(slope1>=0)
{ xx1=x1+slope1*(yy1-y1)+nx2;
xx2=x1+slope1*(yy2-y1)+nx2;
}
else
{ xx1=x1+slope1*(yy2-y1)+nx2;
xx2=x1+slope1*(yy1-y1)+nx2;
}
cx1=(int)floor(xx1);
cx2=(int)floor(xx2);
if(slope2>=0)
{ zz1=(z1+slope2*(yy1-y1))/dz+nz2;
zz2=(z1+slope2*(yy2-y1))/dz+nz2;
}
else
{ zz1=(z1+slope2*(yy2-y1))/dz+nz2;
zz2=(z1+slope2*(yy1-y1))/dz+nz2;
}
cz1=(int)floor(zz1);
cz2=(int)floor(zz2);
if(cx2==cx1)
{ if(cx1>=0&&cx1<=nx-1)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 11
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
ix=cx1;iz=cz1;y[id]+=l*x[iz*ny*nx+iy*nx+ix];sr[id]+=l;
}
}
else
{ if(cz2>0&&cz2<nz)// 12
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
ix=cx1;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
else
{ if(cz2==0)// 13
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
ix=cx1;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
if(cz2==nz)// 14
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rz=(cz2-zz1)/(zz2-zz1);
ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
}
}
}
}
}
else
{ if(cx2>0&&cx2<nx)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 21
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;
ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rx*l;
ix=cx2;iz=cz1;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rx)*l;
}
}
else
{ if(cz2>0&&cz2<nz)// 22
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
ix=cx1;iz=cz2;y[id]+=(rx-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rx-rz)*l;
ix=cx2;iz=cz2;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rx)*l;
}
else
{ ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rx*l;
ix=cx2;iz=cz1;y[id]+=(rz-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rz-rx)*l;
ix=cx2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
}
else
{ if(cz2==0)// 23
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz2;y[id]+=(rx-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rx-rz)*l;
ix=cx2;iz=cz2;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rx)*l;
}
else
{ ix=cx2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
}
if(cz2==nz)// 24
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
}
else
{ ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rx*l;
ix=cx2;iz=cz1;y[id]+=(rz-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rz-rx)*l;
}
}
}
}
}
else
{ if(cx2==0)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 31
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;
ix=cx2;iz=cz1;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rx)*l;
}
}
else
{ if(cz2>0&&cz2<nz)// 32
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx2;iz=cz2;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rx)*l;
}
else
{ ix=cx2;iz=cz1;y[id]+=(rz-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rz-rx)*l;
ix=cx2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
}
else
{ if(cz2==0)// 33
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx2;iz=cz2;y[id]+=(1-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rx)*l;
}
else
{ ix=cx2;iz=cz2;y[id]+=(1-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(1-rz)*l;
}
}
if(cz2==nz)// 34
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{
}
else
{ ix=cx2;iz=cz1;y[id]+=(rz-rx)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rz-rx)*l;
}
}
}
}
}
if(cx2==nx)
{ if(cz2==cz1)
{ if(cz1>=0&&cz1<=nz-1)// 41
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;
ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rx*l;
}
}
else
{ if(cz2>0&&cz2<nz)// 42
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
ix=cx1;iz=cz2;y[id]+=(rx-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rx-rz)*l;
}
else
{ ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rx*l;
}
}
else
{ if(cz2==0)// 43
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz2;y[id]+=(rx-rz)*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=(rx-rz)*l;
}
else
{
}
}
if(cz2==nz)// 44
{ d=xx2-xx1;tmp=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);l=(float)sqrt((d*d+1)*(tmp+(z1-z2)*(z1-z2))/tmp);
rx=(cx2-xx1)/d;rz=(cz2-zz1)/(zz2-zz1);
if(rx>rz)
{ ix=cx1;iz=cz1;y[id]+=rz*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rz*l;
}
else
{ ix=cx1;iz=cz1;y[id]+=rx*l*x[iz*ny*nx+iy*nx+ix];sr[id]+=rx*l;
}
}
}
}
}
}
}
}
}
y[id]*=scale;sr[id]*=scale;
}
}
extern "C" void Ax_cone_mf_gpu_new(float *X,float *y,float *sr,float cos_phi,float sin_phi,float *y_det,float *z_det,
float SO,float OD,float scale,float dz,int nx,int ny,int nz,int na,int nb)
{
float *y_d,*x_d,*sr_d,*y_det_d,*z_det_d;
int nd,N;
N=nx*ny*nz;
nd=na*nb;
cudaMalloc((void**)&y_d,nd*sizeof(float));
cudaMalloc((void**)&x_d,N*sizeof(float));cudaMemcpy(x_d,X,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc((void**)&sr_d,nd*sizeof(float));
cudaMalloc((void**)&y_det_d,na*sizeof(float));cudaMemcpy(y_det_d,y_det,na*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc((void**)&z_det_d,nb*sizeof(float));cudaMemcpy(z_det_d,z_det,nb*sizeof(float),cudaMemcpyHostToDevice);
//
dim3 dimBlock(BLOCK_SIZE_x,BLOCK_SIZE_y);
dim3 dimGrid_t((na+dimBlock.x-1)/dimBlock.x,(nb+dimBlock.y-1)/dimBlock.y);
Ax_cone_mf_gpu_kernel_new<<<dimGrid_t, dimBlock>>>(x_d,y_d,sr_d,cos_phi,sin_phi,y_det_d,z_det_d,
SO,OD,scale,dz,nx,ny,nz,na,nb);
cudaThreadSynchronize();
cudaMemcpy(y,y_d,na*nb*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(sr,sr_d,na*nb*sizeof(float),cudaMemcpyDeviceToHost);
//
cudaFree(y_d);cudaFree(x_d);cudaFree(y_det_d);cudaFree(z_det_d);cudaFree(sr_d);
}
|
6,207 | __global__ void update_e( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = Ny*Nz;
int i = idx/Nyz;
int j = ( idx - i*Nyz )/Nz;
int k = idx - i*Nyz - j*Nz;
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( Hz[idx+Nz] - Hz[idx] - Hy[idx+1] + Hy[idx] );
if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( Hx[idx+1] - Hx[idx] - Hz[idx+Nyz] + Hz[idx] );
if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyz] - Hy[idx] - Hx[idx+Nz] + Hx[idx] );
}
}
__global__ void update_h( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = Ny*Nz;
int i = idx/Nyz;
int j = ( idx - i*Nyz )/Nz;
int k = idx - i*Nyz - j*Nz;
if ( i > 0 && j > 0 && k > 0 && i < Nx ) {
Hx[idx] -= 0.5*( Ez[idx] - Ez[idx-Nz] - Ey[idx] + Ey[idx-1] );
Hy[idx] -= 0.5*( Ex[idx] - Ex[idx-1] - Ez[idx] + Ez[idx-Nyz] );
Hz[idx] -= 0.5*( Ey[idx] - Ey[idx-Nyz] - Ex[idx] + Ex[idx-Nz] );
}
}
|
6,208 | /*
* Copyright 2021 Roman Klassen
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
*/
#include "hash_algs.cuh"
__constant__ unsigned int Crc32Table[256] =
{
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
};
__device__
unsigned int CRC32(char *buf, unsigned int len)
{
unsigned int crc = 0xFFFFFFFF;
for (int i = 0; i < len; i++)
crc = (crc >> 8) ^ Crc32Table[(crc ^ buf[i]) & 0xFF];
return crc ^ 0xFFFFFFFF;
}
__device__
unsigned int MurmurHash2 (char * buf, int len, unsigned int seed )
{
// 'm' and 'r' are mixing constants generated offline.
// They're not really 'magic', they just happen to work well.
const unsigned int m = 0x5bd1e995;
const int r = 24;
// Initialize the hash to a 'random' value
unsigned int h = seed ^ len;
// Mix 4 bytes at a time into the hash
const unsigned char * data = (const unsigned char *)buf;
while(len >= 4)
{
unsigned int k = *(unsigned int*)data;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
len -= 4;
}
// Handle the last few bytes of the input array
switch(len)
{
case 3: h ^= data[2] << 16;
case 2: h ^= data[1] << 8;
case 1: h ^= data[0];
h *= m;
};
// Do a few final mixes of the hash to ensure the last few
// bytes are well-incorporated.
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
__device__
unsigned int MurMurHash(char *buf, unsigned int len)
{
return MurmurHash2(buf, len, 0xc58f1a7a);
} |
6,209 |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <curand_kernel.h>
#define D 5
#define BLOCKS 125
#define THREADS 25
#define N 5
__global__ void simpson_int(double *res) {
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
int t = tid;
double integral = 0.0;
double X[D];
X[0] = t % N; t /= N;
X[1] = t % N; t /= N;
X[2] = t % N; t /= N;
X[3] = t % N; t /= N;
X[4] = t % N;
double T = 0.0;
for (int j = 0; j < D; j++) {
X[j] = X[j] / N;
T -= X[j] * X[j];
}
integral += exp(T) * pow(1.0/N, 5.0);
res[tid] = integral;
}
int main(int argc, char **argv) {
double host[BLOCKS * THREADS];
double *dev;
double integral = 0.0;
double vol = 1.0;
clock_t ts = clock();
struct timeval start, end;
gettimeofday(&start, NULL);
cudaMalloc((void**) &dev, BLOCKS * THREADS * sizeof(double));
simpson_int<<<BLOCKS, THREADS>>>(dev);
cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(double),
cudaMemcpyDeviceToHost);
for(int i = 0; i < BLOCKS * THREADS; i++) {
integral += host[i];
}
for (int j = 0; j < D; j++) {
vol *= 1.0;
}
integral *= vol;
gettimeofday(&end, NULL);
double elapsed = ((end.tv_sec - start.tv_sec) * 1000000u +
end.tv_usec - start.tv_usec) / 1.e6;
ts = clock() - ts;
printf("%ld clocks (%lf seconds)\n", ts, elapsed);
printf("integral is: %lf\n", integral);
cudaFree(dev);
}
|
6,210 | #include "includes.h"
__global__ void reduceGmem(int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
6,211 | __global__ void calculate_inner_grid(double* grid_0, double* grid_1, double* grid_2, int bx, int by, int bz){
int N = (bx + 2) * (by + 2) * (bz * 2);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, j, k;
double uijk = grid_1[idx], laplace = 0.;
i = N % (bx + 2);
if (i < 2 && i >= bx) return;
j = N / (bx + 2) % (by + 2);
if (j < 2 && j >= by) return;
k = N / ((bx + 2) * (by + 2));
if (k < 2 && k >= bz) return;
grid_2[idx] = 2 * grid_1[idx] - grid_0[idx];
}
__global__ void first_step(double* grid_0, double* grid_1, \
int bx, int by, int bz, \
double hx, double hy, double hz, \
double block_x_len, double block_y_len, double block_z_len, \
int Lx, int Ly, int Lz, \
int Nx, int Ny, int Nz, \
int nx, int ny, int nz, \
int block_pos_x, int block_pos_y, int block_pos_z,
double at, double t){
int N = (bx + 2) * (by + 2) * (bz * 2);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, j, k;
double uijk = grid_1[idx], laplace = 0.;
double x, y, z;
i = N % (bx + 2);
if (i < 1 && i > bx) return;
j = N / (bx + 2) % (by + 2);
if (j < 1 && j > by) return;
k = N / ((bx + 2) * (by + 2));
if (k < 1 && k > bz) return;
x = (i - 1) * hx + block_pos_x * block_x_len + min(Nx % nx, block_pos_x) * hx;
y = (j - 1) * hy + block_pos_y * block_y_len + min(Ny % ny, block_pos_y) * hy;
z = (k - 1) * hz + block_pos_z * block_z_len + min(Nz % nz, block_pos_z) * hz;
grid_0[idx] = sin(3.14 / Lx * x) * sin(3.14 / Ly * y) * sin(2 * 3.14 / Lz * z) * cos(at * t + 2 * 3.14);
}
|
6,212 |
/*
This function takes the set of points (xj,yj) defining a closed curve
and populates the signed distance function Phi.
The time for this should be of order Nx*Ny*points.
Each of the Nx*Ny grid point independently loops through all points to determine
its minDist from curve and if it is located inside or outside of the closed curve.
*/
#include <stdio.h>
#include <math.h>
__global__ void SetPhi(float* phi, int Nx, int Ny, int pitch, int points, float * fx, float * fy,
float Xmin, float Ymin, float dx, float dy)
{
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = index_x * pitch + index_y;
int row = idx/pitch;
int col = idx%pitch;
float sign = 1;
float area = 0;
float minDist = 1000;
float x = Xmin + col * dx;
float y = Ymin + row * dy;
for (int i = 0; i < points ; ++i)
{
// Sign-determine part.
float fx_i = fx[i];
float fy_i = fy[i];
float fx_ipp = fx[i+1];
float fy_ipp = fy[i+1];
float cross = (fx_i - x) * (fy_ipp - y) - (fy_i - y) * (fx_ipp - x);
float dot = (fx_i - x) * (fx_ipp - x) + (fy_i - y) * (fy_ipp - y);
area += atan2(cross, dot);
// Distance-determine part.
float top = (x - fx_ipp) * (fx_i - fx_ipp) + (y - fy_ipp) * (fy_i - fy_ipp);
float bot = (fx_i - fx_ipp) * (fx_i - fx_ipp) + (fy_i - fy_ipp) * (fy_i - fy_ipp);
float ratio = top / bot;
float distSq = 0;
if (ratio > 1) {
float xdiff = x - fx_i;
float ydiff = y - fy_i;
distSq = hypotf(xdiff, ydiff);
} else if (ratio < 0) {
float xdiff = x - fx_ipp;
float ydiff = y - fy_ipp;
distSq = hypotf(xdiff, ydiff);
} else {
float ux = (1 - ratio) * fx_ipp + ratio * fx_i;
float uy = (1 - ratio) * fy_ipp + ratio * fy_i;
ux = x - ux;
uy = y - uy;
distSq = hypotf(ux, uy);
}
if (distSq < minDist) {
minDist = distSq;
}
}
if (area < 0.1) { sign = -1; }
float xx = sign * (minDist);
if (col<Ny && row<Nx ){
phi[idx] = xx ;
printf("%d \t [%d][%d]=(%3.3f,%3.3f) \t %3.4f \t %3.3f \n",idx, row, col, x,y, minDist, area);
}
}
|
6,213 | /*
compile the program as:
nvcc -arch sm_75 hello.cu -o hello
其中sm_后面的数字随着显卡架构不同而不同
75对应的是Turing架构
*/
#include <stdio.h>
__global__ void helloFromGPU()
{
if(threadIdx.x == 5)
printf("Hello World from GPU !\n");
}
int main()
{
printf("Hello World from CPU !\n");
helloFromGPU <<<1, 10>>>();
cudaDeviceReset();
// cudaDeviceSynchronize();
return 0;
}
|
6,214 | #include <iostream>
#include <chrono>
#include <cuda_profiler_api.h>
__global__ void parallel_for(const int n, double* dax, double* dbx,
const double dt) {
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < n) {
dax[tid] = dax[tid] + dbx[tid]*dt;
}
}
int main()
{
const int Nl = 1000;
const double dt=0.001;
const int N = 1000000;
int blockSize = 64;
int numBlocks = (N + blockSize -1) / blockSize;
double* dax;
double* dbx;
cudaMalloc((void**)&dax, sizeof(double)*N);
cudaMalloc((void**)&dbx, sizeof(double)*N);
//warm up
for(int j=0; j<100; j++)
{
parallel_for<<<numBlocks, blockSize>>>(N, dax, dbx, dt);
}
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::duration<float> fsec;
cudaDeviceSynchronize();
auto start_clock = Time::now();
cudaProfilerStart();
for(int j=0; j<Nl; j++)
{
parallel_for<<<numBlocks, blockSize>>>(N, dax, dbx, dt);
}
cudaDeviceSynchronize();
cudaProfilerStop();
auto finish_clock = Time::now();
fsec fs = finish_clock - start_clock;
std::cout << "time taken for cuda parallel for (msecs):" << fs.count()*1e3/Nl << std::endl;
cudaFree(dax);
cudaFree(dbx);
return 0;
}
|
6,215 | #include <iostream>
#include <cuda.h>
#include <stdio.h>
using namespace std;
#define N 20
__global__ void addition(int *a, int *b, int *c)
{
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
int size = N*sizeof(int);
int i;
cudaError_t err;
err = cudaMalloc((void**)&dev_a, size);
if(err != cudaSuccess){
cout<<"Error1 \n";
}
err = cudaMalloc((void**)&dev_b, size);
if(err != cudaSuccess){
cout<<"Error2 \n";
}
err = cudaMalloc((void**)&dev_c, size);
if(err != cudaSuccess){
cout<<"Error3 \n";
}
for (i=0; i<N; i++){
a[i] = -i;
b[i] = i*i;
}
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
addition<<<N,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for (i=0; i<N; i++){
cout<<a[i]<<" + "<<b[i]<<" = "<<c[i]<<"\n";
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
6,216 | #include "includes.h"
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int nxthreads = gridDim.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int ix2 = ix + nxthreads;
unsigned int idx = iy * nx + ix;
unsigned int idx2 = iy * nx + ix2;
if (iy < ny)
{
if (ix < nx)
MatC[idx] = MatA[idx] + MatB[idx];
if (ix2 < nx)
MatC[idx2] = MatA[idx2] + MatB[idx2];
}
} |
6,217 | #include "includes.h"
__global__ void arrayFill(float* data, float value, int size) {
int stride = gridDim.x * blockDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < size; i += stride) data[i] = value;
} |
6,218 | #include <stdio.h>
#include <cmath>
#include "Cuda/PBKDF2.cu"
#define ERRCHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true,bool wait=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define SHA256_DIGESTSIZE 32
#define SHA256_BLOCKSIZE 64
__global__ void PBKDF2Kernel(int n, unsigned char *out) {
unsigned char passwd[22] = "governor washout beak";
int pwd_len = sizeof(passwd) - 1;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<n; i += blockDim.x * gridDim.x) {
cuda_derive_key_sha256(passwd, pwd_len, &out[SHA256_DIGESTSIZE*i]);
}
}
int main() {
cudaError_t err;
int device = 0;
int numSMs;
const int N = 1;
cudaDeviceProp props;
err = cudaGetDeviceProperties(&props, device);
if(err) { return -1; }
printf("%s (%2d)\n", props.name, props.multiProcessorCount);
cudaSetDevice(device);
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, device);
cudaError_t error;
/* Stop eating CPU while waiting for results! */
error = cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
if (error != cudaSuccess) {
fprintf(stderr, "Could not set blocking sync (error %d)\n", error);
}
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, PBKDF2Kernel, 0, N);
// Round up according to array size
gridSize = (N + blockSize - 1) / blockSize;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Occupancy calculator elapsed time: %3.3f ms \n", time);
unsigned char *finalDestination;
cudaMallocManaged((void **)&finalDestination, N*SHA256_DIGESTSIZE);
ERRCHECK(cudaGetLastError());
printf("SMs: %d, X: %d, Y: %d\n", numSMs, 32*numSMs, 512);
printf("grid: %d, min grid: %d, block: %d\n", gridSize, minGridSize, blockSize);
//PBKDF2Kernel<<<1,1>>>(N, 100000, d_hash);
PBKDF2Kernel<<<gridSize,blockSize>>>(N, finalDestination);
ERRCHECK(cudaDeviceSynchronize());
ERRCHECK(cudaGetLastError());
for(int i = 0; i < N; i++) {
for(int j = 0; j < SHA256_DIGESTSIZE; j++) {
printf("%02x ", finalDestination[i*SHA256_DIGESTSIZE+j]);
}
printf("\n");
}
cudaFree(finalDestination);
cudaDeviceReset();
return 0;
}
|
6,219 | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
template <class scalar_t>
__global__ void axpy (scalar_t a, scalar_t *x, scalar_t *y)
{
y[threadIdx.x] = a * x[threadIdx.x];
}
template <class scalar_t>
void run_it (scalar_t a, scalar_t *x, scalar_t *y, size_t N)
{
axpy<scalar_t><<<1, N>>> (a, x, y);
}
template void run_it (float a, float *x, float *y, size_t N);
template void run_it (double a, double *x, double *y, size_t N);
|
6,220 | #include "includes.h"
__global__ void vector_add(double const *A_dev, double const *B_dev, double *C_dev, int const N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* if(i%512==0)
* printf("index %d\n",i); */
if (i < N)
C_dev[i] = A_dev[i] + B_dev[i];
} |
6,221 | #include <cuda.h>
#include <stdio.h>
#define N 16
// Tipo de los datos del algoritmo
typedef int data_t;
// Prototipos
data_t add(const data_t a, const data_t b) { return a + b; }
data_t sub(const data_t a, const data_t b) { return a - b; }
void init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t));
void run_GPU(data_t* host_A, data_t* host_B, const unsigned int n_bytes);
__global__ void kernel_operations(data_t * A, data_t * B);
// Host function
int
main(int argc, char** argv)
{
//const int N = (argc == 2) ? atoi(argv[1]) : 0;
if (!N){
printf("Parametros incorrectos. El programa se cierra\n");
return -1;
}
// En la CPU...
// ...Aloca matrices
const unsigned int n_bytes = sizeof(data_t)*N*N;
data_t *host_A = (data_t*) malloc(n_bytes);
data_t *host_B = (data_t*) malloc(n_bytes);
// ...Inicializa matrices
init_matrix(host_A, N, &add);
init_matrix(host_B, N, &sub);
run_GPU(host_A, host_B, n_bytes);
free(host_A);
free(host_B);
return 0;
}
void
run_GPU(data_t* host_A, data_t* host_B, const unsigned int n_bytes)
{
data_t *gpu_A, *gpu_B;
int i, j;
// Aloca memoria en GPU
cudaMalloc((void**)&gpu_A, n_bytes);
cudaMalloc((void**)&gpu_B, n_bytes);
host_A[0] = 1;
// Copia los datos desde el host a la GPU
cudaMemcpy(gpu_A, host_A, n_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_B, host_B, n_bytes, cudaMemcpyHostToDevice);
// Configura el tamaño de los grids y los bloques
dim3 dimGrid(16); // one block per word
dim3 dimBlock(16); // one thread per character
// Invoca al kernel
kernel_operations<<< dimGrid, dimBlock >>>(gpu_A, gpu_B);
// Recupera los resultados, guardandolos en el host
cudaMemcpy(host_A, gpu_A, n_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(host_B, gpu_B, n_bytes, cudaMemcpyDeviceToHost);
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
printf("%8d ",host_A[i*N+j]);
}
printf("\n");
}
// Libera la memoria alocada en la GPU
cudaFree(gpu_A);
cudaFree(gpu_B);
}
// El kernel que ejecutara en cada hilo de la GPU
__global__ void kernel_operations(data_t *A, data_t *B){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
A[global_id] = (A[global_id]-B[global_id])*(A[global_id]-B[global_id]);
//A[global_id] = 2;
}
// Funcion para la inicializacion de las matrices
void
init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t))
{
unsigned int i,j;
for (i=0; i<size; i++) {
for (j=0; j<size; j++) {
M[i*size + j] = (*init_op)(i,j);
}
}
}
|
6,222 | /**
*Developed By Karan Bhagat
*March 2017
**/
#include <stdio.h>
#include <stdlib.h>
//cuda kernel for multiplying two matrices without tiling
__global__ void matrix_mul_kernel(int* a, int* b, int* c, int a_rows, int a_columns, int b_columns)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//check if thread directly maps to the dimensions of resulting matrix
if (row < a_rows && col < b_columns)
{
int result = 0;
int k;
for (k = 0; k < a_columns; k++)
{
result += (a[row * a_columns + k] * b[k * b_columns + col]);
}
c[row * b_columns + col] = result;
}
}
void build_matrix(FILE *file, int* mat, int rows, int columns);
int main(int argc, char **argv)
{
//check for filenames and matrices' dimensions
if (argc != 6)
{
printf("Usage : ./matrix_mul_tiling <fileA> <fileB> <A_rows> <A_columns> <B_columns>");
exit(1);
}
char* fileA_name = argv[1];//matrix A filename
char* fileB_name = argv[2];//matrix B filename
// a_columns can also be perceived as b_rows
int a_rows, a_columns, b_columns;
//read matrix A and B's dimensions
sscanf(argv[3], "%d", &a_rows);
sscanf(argv[4], "%d", &a_columns);
sscanf(argv[5], "%d", &b_columns);
FILE *fileA = fopen(fileA_name, "r");
FILE *fileB = fopen(fileB_name, "r");
//declare host and device matrices pointers
int* mat_a;
int* mat_b;
int* mat_c;
int* d_mat_a;
int* d_mat_b;
int* d_mat_c;
//allocate memory for host matrices
mat_a = (int*)malloc(a_rows * a_columns * sizeof(int));
mat_b = (int*)malloc(a_columns * b_columns * sizeof(int));
mat_c = (int*)malloc(a_rows * b_columns * sizeof(int));
int i, j;
build_matrix(fileA, mat_a, a_rows, a_columns);
build_matrix(fileB, mat_b, a_columns, b_columns);
//declare dimensions for the grid and block
dim3 dimBlock(2,2);
dim3 dimGrid((int)ceil(b_columns/2),(int)ceil(a_rows/2));
const size_t size_a = a_rows * a_columns * sizeof(int);
const size_t size_b = a_columns * b_columns * sizeof(int);
const size_t size_c = a_rows * b_columns * sizeof(int);
//allocate matrices memeory on device
cudaMalloc((void **)&d_mat_a, size_a);
cudaMalloc((void **)&d_mat_b, size_b);
cudaMalloc((void **)&d_mat_c, size_c);
//copy A and B matrices from host to device
cudaMemcpy(d_mat_a, mat_a, size_a, cudaMemcpyHostToDevice);
cudaMemcpy(d_mat_b, mat_b, size_b, cudaMemcpyHostToDevice);
//execute cuda kernel
matrix_mul_kernel<<<dimGrid, dimBlock>>>(d_mat_a, d_mat_b, d_mat_c, a_rows, a_columns, b_columns);
//copy the compute matrix C from device to host
cudaMemcpy(mat_c, d_mat_c, size_c, cudaMemcpyDeviceToHost);
//free cuda memory
cudaFree(d_mat_a);
cudaFree(d_mat_b);
cudaFree(d_mat_c);
//print the resulting matrix
for (i = 0; i < a_rows; i++)
{
for (j = 0; j < b_columns; j++)
{
printf("%d ", mat_c[i * b_columns + j]);
}
printf("\n");
}
}
//build matrix from the file
void build_matrix(FILE *file, int* mat, int rows, int columns)
{
int i, j;
for (i = 0; i < rows; i++)
{
for (j = 0; j < columns; j++)
{
fscanf(file, "%d", &mat[i * columns + j]);
}
}
} |
6,223 | template<typename T>
__device__ void sumRows(const T* matrix, T* result,
const int rows, const int cols) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int col = bx * blockDim.x + tx;
if (col < cols) {
T sum = 0;
#pragma unroll
for (int i = 0; i < rows; i++) {
int index = i * cols + col;
sum += matrix[index];
}
result[col] = sum;
}
} |
6,224 | #include "includes.h"
// filename: vsquare.cu
// a simple CUDA kernel to element multiply vector with itself
extern "C" // ensure function name to be exactly "vsquare"
{
}
__global__ void expkernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i] = exp(a[i]);
}
} |
6,225 | #include "includes.h"
#define SIZ 20
#define num_inp 4
using namespace std;
typedef struct edge {
int first, second;
} edges;
__global__ void grads_w1_kernel(double * grads_W1,double * W1,double reg, int size)
{
int i = blockIdx.x;
int j = threadIdx.x;
grads_W1[i*size + j] += W1[i*size + j] * reg;
} |
6,226 | //Example CUDA code, written and commented by Jose Monsalve
//Taken from CUDA C/C++ Basics
//Supercomputing 2011 Tutorial
//by NVIDIA
/**
This code executes c=a+b in a single thread in a GPU device.
It is a really simple code that is intended to show the memory
movement between host and device, but not the division of the
work between the differetn GPU threads.
First, we define the kernel (function that is executed in the
GPU device), second, we initialize the values in the host (CPU)
and we allocate (reserve) some memory in the GPU). Then we move
these values to the GPU device through explicitely memory movement
and we start the execution of the add kernel. Finally, after the
computation is done, we copy the information back from the GPU and
display it
**/
#include <stdio.h>
#include <stdlib.h>
//Simple add kernel, this function will be executed in the GPU device.
//C=A+B
__global__ void add(int *a, int *b, int *c)
{
extern __shared__ int shared_mem[];
int * shmem=shared_mem;
shmem[threadIdx.x]=threadIdx.x;
a[threadIdx.x]=shmem[threadIdx.x];
b[threadIdx.x]=shmem[threadIdx.x];
c[threadIdx.x]=a[threadIdx.x]+b[threadIdx.x];
}
//main func
int main(void)
{
//Initializing the host variables
//The *d_X are pointers that are not accessible in the host
//directly but they represent a way to refer to the data in the
//GPU device
int a[10], b[10], c[10];
int *d_a, *d_b, *d_c;
int size = sizeof(int)*10;
//Initial values in the host
//Initializing memory in the GPU device,
//reserving the space, but there is not value yet (requires explicit movement)
cudaMalloc ( (void **) &d_a, size );
cudaMalloc ( (void **) &d_b, size );
cudaMalloc ( (void **) &d_c, size );
//moving the values that we just initialize in the host
//to the GPU device (Explicit memory movement)
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
printf("Adding %d and %d in the device ... \n",a,b);
//starting the kernel function
add<<<1,10,10*sizeof(int)>>>(d_a,d_b,d_c);
//Bringing back the result, which is stored in the GPU device
//And needs to be manually obtained.
cudaMemcpy(c,d_c,size, cudaMemcpyDeviceToHost);
printf("Result is \n");
//printing the result
for ( int i = 0; i < 10 ; i++)
printf("%d\t",c[i]);
printf("\n");
//cleaning device.
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
6,227 | #include "user_host.cuh"
__host__
void host_maxValueVector(float *vec, int vector_size, float *p_ret_val) {
float maxVal = FLOAT_MIN_VAL;
for (int i = 0; i < vector_size; i++) {
maxVal = (maxVal < vec[i]) ? vec[i] : maxVal;
}
*p_ret_val = maxVal;
}
|
6,228 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
struct COLOR {
uint8_t R;
uint8_t G;
uint8_t B;
};
std::ostream &operator<<(std::ostream &os, COLOR const &m) {
return os << m.R << " " << m.G << " " << m.B;
}
int main(void)
{
const int height = 1;
const int width = 2;
COLOR RED = {255, 0, 0};
std::cout << RED << std::endl;
std::vector<COLOR> A(height*width);
std::fill(A.begin(), A.end(), RED);
for(int i = 0; i < A.size(); i++)
std::cout << "A[" << i << "] = " << A[i] << std::endl;
thrust::device_vector<COLOR> D(height*width);
thrust::fill(D.begin(), D.end(), RED);
thrust::host_vector<COLOR> H(height*width);
thrust::copy(H.begin(), H.end(), D.begin());
// print D
for(int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << std::endl;
for(int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << std::endl;
}
|
6,229 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 10;
// Device input vectors
double *d_a;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
// Free memory
cudaFree(d_a);
return 0;
}
|
6,230 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
__global__ void add(int *a, int *b, int *c, int tmp) {
*c = *a + *b + tmp;
printf("add\n");
printf("%d %d\n", *a, tmp);
}
int main() {
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = 0;
b = 1;
c = 2;
printf("[before]%d %d %d\n", a, b, c);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
add<<< 1, 1 >>>(d_a, d_b, d_c, 4);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("[after]%d %d %d\n", a, b, c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
6,231 | #include "includes.h"
__global__ void kernel_setweights(int N, double *wt, double alpha){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
/* make sure to use only N threads */
if (tid<N) {
wt[tid]=alpha;
}
} |
6,232 | #include <iostream>
#include <algorithm>
#include <stdio.h>
using namespace std;
#define BLOCK_SIZE 16
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
static void HandleError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString( err ), file, line);
exit(EXIT_FAILURE);
}
}
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] + b[index];
}
void print_mat(const char * name, int r, int c, double *m){
printf("Printing %s\n", name);
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
printf("%.2lf ", m[i * c + j]);
}
printf("\n");
}
}
/*
Leaky RELU
*/
__global__ void gpu_l_relu(double *res, int n, int k, int channel){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockIdx.z * n * k;
if( col < k && row < n)
{
int index = offset + row * k + col;
if(res[index] < 0.0) res[index] *= 0.1;
}
}
extern "C" {
void leaky_relu(double *res, int n, int k, int channel){
// Allocate memory space on the device
double *dev_res;
cudaMalloc((void **) &dev_res, sizeof(double)*n*k*channel);
// copy matrix A and B from host to device memory
cudaMemcpy(dev_res, res, sizeof(double)*n*k*channel, cudaMemcpyHostToDevice);
unsigned int gridev_rows = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int gridev_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int channels = channel;
dim3 dimGrid(gridev_cols, gridev_rows, channels);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Launch kernel
gpu_l_relu<<<dimGrid, dimBlock>>>(dev_res, n, k, channel);
// Transefr results from device to host
cudaMemcpy(res, dev_res, sizeof(double)*n*k*channel, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// free memory
cudaFree(dev_res);
}
}
/*
Batch Norm
*/
__global__ void gpu_b_norm (double *res, double *mean, double *gamma,
double *variance, int n, int k, int channel){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockIdx.z * n * k;
if( col < k && row < n) {
int index = offset + row * k + col;
// double divisor = sqrt(variance[blockIdx.z] + epsilon);
double divident = gamma[blockIdx.z] * (res[index] - mean[blockIdx.z]);
res[index] = divident / variance[blockIdx.z];
}
}
extern "C" {
void batch_norm(double *res, double *mean, double *gamma,
double *variance, int n, int k, int channel){
// Allocate memory space on the device
double *dev_res, *dev_mean, *dev_gamma, *dev_variance;
cudaMalloc((void **) &dev_res, sizeof(double)*n*k*channel);
cudaMalloc((void **) &dev_mean, sizeof(double)*channel);
cudaMalloc((void **) &dev_gamma, sizeof(double)*channel);
cudaMalloc((void **) &dev_variance, sizeof(double)*channel);
// copy matrix A and B from host to device memory
cudaMemcpy(dev_res, res, sizeof(double)*n*k*channel, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mean, mean, sizeof(double)*channel, cudaMemcpyHostToDevice);
cudaMemcpy(dev_gamma, gamma, sizeof(double)*channel, cudaMemcpyHostToDevice);
cudaMemcpy(dev_variance, variance, sizeof(double)*channel, cudaMemcpyHostToDevice);
unsigned int gridev_rows = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int gridev_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int channels = channel;
dim3 dimGrid(gridev_cols, gridev_rows, channels);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Launch kernel
gpu_b_norm<<<dimGrid, dimBlock>>>(dev_res, dev_mean, dev_gamma, dev_variance,
n, k, channel);
// Transefr results from device to host
cudaMemcpy(res, dev_res, sizeof(double)*n*k*channel, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// free memory
cudaFree(dev_res);
}
}
/*
Add Bias
*/
__global__ void gpu_add_bias (double *res, double *bias, int n, int k, int channel){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockIdx.z * n * k;
if( col < k && row < n)
{
res[offset + row * k + col] += bias[blockIdx.z];
}
}
extern "C" {
void add_bias(double * C, double * bias, int n, int k, int channel){
// Allocate memory space on the device
double *dev_b, *dev_c;
cudaMalloc((void **) &dev_b, sizeof(double)*channel);
cudaMalloc((void **) &dev_c, sizeof(double)*n*k*channel);
// copy matrix A and B from host to device memory
cudaMemcpy(dev_b, bias, sizeof(double)*channel, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, C, sizeof(double)*n*k*channel, cudaMemcpyHostToDevice);
unsigned int gridev_rows = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int gridev_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int channels = channel;
dim3 dimGrid(gridev_cols, gridev_rows, channels);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Launch kernel
gpu_add_bias<<<dimGrid, dimBlock>>>(dev_c, dev_b, n, k, channel);
// Transefr results from device to host
cudaMemcpy(C, dev_c, sizeof(double)*n*k*channel, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// free memory
cudaFree(dev_b);
cudaFree(dev_c);
}
}
/*
MAX Pool
*/
__global__ void gpu_max (double *res, double *cols, int n, int size){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
{
int offset = index;
// printf("%d\n", offset);
double max = -10000.0;
for(int i =0; i<size; i++){
if(cols[offset + i*n] > max){
max = cols[offset + i*n];
}
}
// printf("Max value for index %d: %.2lf\n", index, max);
res[index] = max;
}
}
extern "C" {
void maxpool(double * C, double * cols, int n, int size){
// Allocate memory space on the device
double *dev_cols, *dev_c;
cudaMalloc((void **) &dev_cols, sizeof(double)*n*size);
cudaMalloc((void **) &dev_c, sizeof(double)*n);
// copy matrix A and B from host to device memory
cudaMemcpy(dev_cols, cols, sizeof(double)*n*size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, C, sizeof(double)*n, cudaMemcpyHostToDevice);
// Launch kernel
gpu_max<<<(n + 1024 - 1)/1024, 1024>>>( dev_c, dev_cols, n, size);
// Transefr results from device to host
cudaMemcpy(C, dev_c, sizeof(double)*n, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// free memory
cudaFree(dev_cols);
cudaFree(dev_c);
}
}
/*
Convolution
*/
__global__ void gpu_multABtoC(double *a,double *b, double *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
extern "C"{
void conv2d(double *C, double *A, double *B, int m, int n, int k)
{
// Allocate memory space on the device
double *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, sizeof(double)*m*n);
cudaMalloc((void **) &dev_b, sizeof(double)*n*k);
cudaMalloc((void **) &dev_c, sizeof(double)*m*k);
// copy matrix A and B from host to device memory
cudaMemcpy(dev_a, A, sizeof(double)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, B, sizeof(double)*n*k, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, C, sizeof(double)*m*k, cudaMemcpyHostToDevice);
unsigned int gridev_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int gridev_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(gridev_cols, gridev_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Launch kernel
gpu_multABtoC<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, m, n, k);
// Transefr results from device to host
cudaMemcpy(C, dev_c, sizeof(double)*m*k, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// free memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
} |
6,233 | #include <thrust/gather.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
//#include <cuda.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <cmath>
#include "math.h"
extern "C" { //required to avoid name mangling in PyCUDA: https://devtalk.nvidia.com/default/topic/471412/pycuda-thrust-example-in-case-someone-is-curious/
void thrust_sort_double(double* input_ptr, int length)
{
thrust::device_ptr<double> thrust_ptr(input_ptr);
thrust::sort(thrust_ptr, thrust_ptr + length);
}
void thrust_sort_by_key_double(double* key_ptr, int length, double* val_ptr)
{
thrust::device_ptr<double> thrust_key_ptr(key_ptr);
thrust::device_ptr<double> thrust_val_ptr(val_ptr);
thrust::sort_by_key(thrust_key_ptr, thrust_key_ptr + length, thrust_val_ptr);
}
void thrust_get_sort_perm_double(double* input_ptr, int length, int* perm_ptr)
{
thrust::device_ptr<double> thrust_ptr(input_ptr);
thrust::device_ptr<int> indices(perm_ptr);
thrust::sequence(indices, indices + length);
thrust::sort_by_key(thrust_ptr, thrust_ptr + length, indices);
}
void thrust_get_sort_perm_long(long* input_ptr, int length, int* perm_ptr)
{
thrust::device_ptr<long> thrust_ptr(input_ptr);
thrust::device_ptr<int> indices(perm_ptr);
thrust::sequence(indices, indices + length);
thrust::sort_by_key(thrust_ptr, thrust_ptr + length, indices);
}
void thrust_get_sort_perm_int(int* input_ptr, int length, int* perm_ptr)
{
thrust::device_ptr<int> thrust_ptr(input_ptr);
thrust::device_ptr<int> indices(perm_ptr);
thrust::sequence(indices, indices + length);
thrust::sort_by_key(thrust_ptr, thrust_ptr + length, indices);
}
void thrust_apply_sort_perm_double(double* input_ptr, int length, double* output_ptr, int* perm_ptr)
{
thrust::device_ptr<double> thrust_input_ptr(input_ptr);
thrust::device_ptr<double> thrust_output_ptr(output_ptr);
thrust::device_ptr<int> indices(perm_ptr);
thrust::gather(indices, indices + length, thrust_input_ptr, thrust_output_ptr);
}
void thrust_apply_sort_perm_long(long* input_ptr, int length, long* output_ptr, int* perm_ptr)
{
thrust::device_ptr<long> thrust_input_ptr(input_ptr);
thrust::device_ptr<long> thrust_output_ptr(output_ptr);
thrust::device_ptr<int> indices(perm_ptr);
thrust::gather(indices, indices + length, thrust_input_ptr, thrust_output_ptr);
}
void thrust_apply_sort_perm_int(int* input_ptr, int length, int* output_ptr, int* perm_ptr)
{
thrust::device_ptr<int> thrust_input_ptr(input_ptr);
thrust::device_ptr<int> thrust_output_ptr(output_ptr);
thrust::device_ptr<int> indices(perm_ptr);
thrust::gather(indices, indices + length, thrust_input_ptr, thrust_output_ptr);
}
void thrust_lower_bound_int(int* sorted_ptr, int sorted_length, int* bounds_ptr, int bounds_length, int* output_ptr)
{
thrust::device_ptr<int> thrust_sorted_ptr(sorted_ptr);
thrust::device_ptr<int> thrust_bounds_ptr(bounds_ptr);
thrust::device_ptr<int> thrust_output_ptr(output_ptr);
thrust::lower_bound(thrust_sorted_ptr, thrust_sorted_ptr + sorted_length, thrust_bounds_ptr, thrust_bounds_ptr + bounds_length, thrust_output_ptr);
}
void thrust_upper_bound_int(int* sorted_ptr, int sorted_length, int* bounds_ptr, int bounds_length, int* output_ptr)
{
thrust::device_ptr<int> thrust_sorted_ptr(sorted_ptr);
thrust::device_ptr<int> thrust_bounds_ptr(bounds_ptr);
thrust::device_ptr<int> thrust_output_ptr(output_ptr);
thrust::upper_bound(thrust_sorted_ptr, thrust_sorted_ptr + sorted_length, thrust_bounds_ptr, thrust_bounds_ptr + bounds_length, thrust_output_ptr);
}
void thrust_cumsum_double(double* data_ptr, int length, double* sum_ptr)
{
thrust::device_ptr<double> thrust_data_ptr(data_ptr);
thrust::device_ptr<double> thrust_sum_ptr(sum_ptr);
thrust::inclusive_scan(thrust_data_ptr, thrust_data_ptr + length, thrust_sum_ptr);
}
void thrust_cumsum_int(int* data_ptr, int length, int* sum_ptr)
{
thrust::device_ptr<int> thrust_data_ptr(data_ptr);
thrust::device_ptr<int> thrust_sum_ptr(sum_ptr);
thrust::inclusive_scan(thrust_data_ptr, thrust_data_ptr + length, thrust_sum_ptr);
}
// ---------------- slice statistics using thrust ---------------- //
// StackOverflow inspired:
// http://stackoverflow.com/questions/12380966/standard-deviation-using-cuda
typedef double T;
// This example computes several statistical properties of a data
// series in a single reduction. The algorithm is described in detail here:
// http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
//
// Thanks to Joseph Rhoads for contributing this example
// structure used to accumulate the moments and other
// statistical properties encountered so far.
struct summary_stats_data
{
T n;
T mean;
T M2;
// initialize to the identity element
void initialize()
{
n = mean = M2 = 0;
}
__host__ __device__
T variance() { return M2 / (n - 1); }
__host__ __device__
T variance_n() { return M2 / n; }
};
// stats_unary_op is a functor that takes in a value x and
// returns a summary_stats_data whose mean value is initialized to x.
struct summary_stats_unary_op
{
__host__ __device__
summary_stats_data operator()(const T& x) const
{
summary_stats_data result;
result.n = 1;
result.mean = x;
result.M2 = 0;
return result;
}
};
// summary_stats_binary_op is a functor that accepts two summary_stats_data
// structs and returns a new summary_stats_data which are an
// approximation to the summary_stats for
// all values that have been agregated so far
struct summary_stats_binary_op
: public thrust::binary_function<const summary_stats_data&,
const summary_stats_data&,
summary_stats_data >
{
__host__ __device__
summary_stats_data operator()(const summary_stats_data& x,
const summary_stats_data& y) const
{
summary_stats_data result;
// precompute some common subexpressions
T n = x.n + y.n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
//Basic number of samples (n)
result.n = n;
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
return result;
}
};
struct extract_mean
{
__host__ __device__
T operator()(summary_stats_data& x)
{
return x.mean;
}
};
struct extract_std
{
__host__ __device__
T operator()(summary_stats_data& x)
{
if (x.n < 1.001) {
return 0.;
} else {
return std::sqrt(x.variance());
}
}
};
void thrust_stats_per_slice(
int* particle_slice_id_ptr, double* u, const int n_mp,
// int* n_macroparticles_per_slice, const int n_slices, // inputs end
int* slice_id_ptr, double* slice_mean_ptr,
double* slice_std_ptr, int* n_relevant_entries // outputs end
) {
// set up arguments
summary_stats_unary_op unary_op;
summary_stats_binary_op binary_op;
thrust::equal_to<int> binary_pred;
extract_mean unary_extract_mean;
extract_std unary_extract_std;
// input pointers
thrust::device_ptr<double> thrust_u(u);
thrust::device_ptr<int> thrust_p_sid(particle_slice_id_ptr);
// output pointers
thrust::device_ptr<int> thrust_sid(slice_id_ptr);
thrust::device_ptr<double> thrust_mean(slice_mean_ptr);
thrust::device_ptr<double> thrust_std(slice_std_ptr);
// intermediate summary_stats_data type arrays
thrust::device_vector<summary_stats_data> stats_vec(n_mp);
thrust::device_vector<summary_stats_data> stats_vec_out(n_mp);
// convert array to summary_stats_data type
thrust::transform(thrust_u, thrust_u + n_mp, stats_vec.begin(), unary_op);
// pointers to end of relevant reduced entries
typedef thrust::device_ptr<int> Iterator1;
typedef thrust::device_vector<summary_stats_data>::iterator Iterator2;
thrust::pair<Iterator1, Iterator2> new_end;
// compute statistics for each slice
new_end = thrust::reduce_by_key(
thrust_p_sid,
thrust_p_sid + n_mp,
stats_vec.begin(),
thrust_sid,
stats_vec_out.begin(),
binary_pred,
binary_op
);
// how many relevant reduced entries in the output arrays:
*n_relevant_entries = new_end.first - thrust_sid;
// extract results and write to output arrays
thrust::transform(stats_vec_out.begin(), new_end.second,
thrust_mean, unary_extract_mean);
thrust::transform(stats_vec_out.begin(), new_end.second,
thrust_std, unary_extract_std);
}
} // end extern "C"
|
6,234 | #include "includes.h"
__global__ void multiply(int *result, int *A, int *B)
{
/* OLD logic
We have a 3 by 3 grid and each block has 3 threads.
So rows = block x id, cols = block y id
So Indices will be C[block X id][block Y id] = A[block X id][threads 0, 1, 2] * B[threads 0, 1, 2][block y id]
*/
//__shared__ int result[_size*_size] ;
/*result[blockIdx.x*blockDim.x +blockIdx.y] += A[blockIdx.x*blockDim.x + threadIdx.x]*B[blockDim.x*threadIdx.x+blockIdx.y];
printf("C[%d] = A[%d]*B[%d] = %d*%d\n",blockIdx.x*blockDim.x +blockIdx.y, blockIdx.x*blockDim.x + threadIdx.x, blockDim.x*threadIdx.x+blockIdx.y,
A[blockIdx.x*blockDim.x + threadIdx.x],B[blockDim.x*threadIdx.x+blockIdx.y]);
Res[blockIdx.x*blockDim.x +blockIdx.y]= result[blockIdx.x*blockDim.x +blockIdx.y];*/
/* NEW logic
I have 3 blocks and 3 threads. Each thread calculates entry for each position compared to the old one having each thread multiplying one value.
So indices will be result[block x id][thread id] = A[block x id][i]* B[i][thread x id]
*/
for(int i=0; i<_size;i++)
{
result[blockIdx.x*blockDim.x +threadIdx.x] += A[blockIdx.x*blockDim.x+i]*B[blockDim.x*i+threadIdx.x];
}
} |
6,235 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <chrono>
__global__ void add_OneBlockOneThread(int n, float *x, float *y, float *z)
{
for (int i = 0; i < n; i++)
z[i] = x[i] + y[i];
}
__global__ void add_OneBlockManyThreads(int n, float *x, float *y, float *z)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] + y[i];
}
__global__ void add_ManyBlocksManyThreads(int n, float *x, float *y, float *z)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] + y[i];
}
__host__ void add_SingleThreadCPU(int n, float *x, float *y, float *z)
{
for (int i = 0; i < n; ++i)
z[i] = x[i] + y[i];
}
int main(void)
{
int N = 1 << 20; // 1048576 elements
float *x, *y, *z;
// Allocate Unified Memory accessible from CPU or GPU
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
cudaMallocManaged(&z, N * sizeof(float));
// ========== CPU, one thread ========== //
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
add_SingleThreadCPU(N, x, y, z);
cudaDeviceSynchronize();
// ========== One block, one thread ========== //
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
add_OneBlockOneThread <<< 1, 1 >>> (N, x, y, z);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// ========== One block, many threads ========== //
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
add_OneBlockManyThreads <<< 1, 512 >>> (N, x, y, z);
cudaDeviceSynchronize();
// ========== Many Blocks, many threads ========== //
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 512;
int numBlocks = (N + blockSize - 1) / blockSize;
add_ManyBlocksManyThreads <<< numBlocks, blockSize >>> (N, x, y, z);
cudaDeviceSynchronize();
cudaFree(x);
cudaFree(y);
cudaFree(z);
return 0;
}
|
6,236 | #include <stdio.h>
__global__ void kernel_example(int value) {
printf("[GPU] Hello from the GPU!\n");
printf("[GPU] The value is %d\n", value);
printf("[GPU] blockDim = %d, blockId = %d, threadIdx = %d\n", blockDim.x, blockIdx.x, threadIdx.x);
}
int main(void) {
int nDevices;
printf("[HOST] Hello from the host!\n");
cudaGetDeviceCount(&nDevices);
printf("[HOST] You have %d CUDA-capable GPU(s)\n", nDevices);
// 4 blocuri, fiecare bloc cu 4 threads
kernel_example<<<4,4>>>(25);
cudaDeviceSynchronize();
return 0;
} |
6,237 | #pragma once
#include <iostream>
namespace RayTracing
{
class Vector3
{
public:
float4 d;
public:
__host__ __device__
Vector3() : d({ 0, 0, 0, 0}) {}
__host__ __device__
Vector3(float x, float y, float z, float w=0) : d({ x, y, z, w }) {}
__host__ __device__
Vector3(const float4 &v) : d(v) {}
__host__ __device__
Vector3 operator-() const;
__host__ __device__
Vector3& operator+=(const Vector3 &v);
__host__ __host__ __device__
Vector3 operator+(const Vector3 &v) const;
__host__ __device__
Vector3& operator-=(const Vector3 &v);
__host__ __device__
Vector3 operator-(const Vector3 &v) const;
__host__ __device__
Vector3& operator*=(const Vector3 &v);
__host__ __device__
Vector3 operator*(const Vector3 &v) const;
/* __host__ __device__ */
/* Vector3 operator*(const float4 &v) const; */
__host__ __device__
Vector3& operator*=(const float t);
__host__ __device__
Vector3 operator*(const float t) const;
__host__ __device__
Vector3& operator/=(const Vector3 &v) = delete;
__host__ __device__
Vector3 operator/(const Vector3 &v) const = delete;
__host__ __device__
Vector3& operator/=(const float t);
__host__ __device__
Vector3 operator/(const float t) const;
__host__ __device__
const float& operator[](const int i) const;
__host__ __device__
float Length() const;
__host__ __device__
float LengthSquared() const;
__host__ __device__
float Dot(const Vector3 &v) const;
__host__ __device__
Vector3 Cross(const Vector3 &v) const;
__host__ __device__
float Dist(const Vector3 &v) const;
__host__ __device__
Vector3 UnitVector() const;
__host__ __device__
bool NearZero() const;
__device__
void AtomicExch(const Vector3& v);
__host__ __device__
Vector3& Clamp(const float tMin, const float tMax);
__host__ __device__
static Vector3 Reflect(const Vector3 &v, const Vector3 &normal);
};
__host__ __device__
Vector3 operator*(float t, const Vector3 &v);
using Point3 = Vector3;
// RGB color
using Color = Vector3;
__host__ __device__
void ColorToRGBA(
const Color &color,
unsigned char &r,
unsigned char &g,
unsigned char &b,
unsigned char &a
);
std::ostream& operator<<(std::ostream &stream, const Color &v);
__host__
std::istream& operator>>(std::istream &istream, Vector3 &v);
} // namespace RayTracing
|
6,238 | #include "includes.h"
__global__ void getIntYArray_kernel(int2* d_input, int startPos, int rLen, int* d_output)
{
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int pos=startPos+resultID;
if(pos<rLen)
{
int2 value=d_input[pos];
d_output[pos]=value.y;
}
} |
6,239 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
if (comp < var_3 + +1.5921E34f) {
if (comp >= -1.2111E34f - var_4 / var_5 / coshf((var_6 - -1.9447E-30f))) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = -1.2044E-44f;
comp = tmp_1 - +1.5647E-37f * -1.2308E-27f;
comp = (var_7 / -1.4092E-41f - tanhf(var_8 + (var_9 - (-1.0533E-6f + var_10 - -1.0586E-23f))));
for (int i=0; i < var_2; ++i) {
float tmp_2 = -1.3208E-37f;
float tmp_3 = (var_11 * +1.6128E-37f * (+1.9686E34f + -1.1417E-36f - var_12));
comp += tmp_3 + tmp_2 * (+1.9661E-36f - var_13 - var_14);
}
if (comp < -1.8627E-36f - (+1.7715E-37f + +1.4506E-25f)) {
comp += (+1.6074E0f + floorf(coshf((+0.0f / (-0.0f - atan2f(+1.7322E19f, +1.4562E-37f / var_15 * -1.7117E34f - +1.8625E-37f - var_16 + -1.1597E17f))))));
float tmp_4 = +1.7180E-41f;
comp = tmp_4 + (+1.8385E9f + log10f((var_17 + acosf(atan2f(logf(+1.9291E17f), -1.2215E35f / var_18 * (-1.4906E12f / -1.8605E-44f))))));
comp += var_19 * var_20 * (var_21 / -1.2871E-42f);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
cudaDeviceSynchronize();
return 0;
}
|
6,240 | #include "includes.h"
__global__ void glcm_calculation_135(int *A,int *glcm, const int nx, const int ny,int max){
int ix = threadIdx.x + blockIdx.x* blockDim.x;
int iy = threadIdx.y + blockIdx.y* blockDim.y;
unsigned int idx =iy*nx+ix;
int i;
int k=0;
for(i=0;i<nx-1;i++){
if(blockIdx.x==i && idx >i*nx){
k=max*A[idx]+A[idx+(nx-1)];
atomicAdd(&glcm[k],1);
}
}
__syncthreads();
} |
6,241 | // System includes
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
//#include <helper_functions.h>
#define rowOffset(X) ((((X) - 1) * ((X) - 1)) / 4)
__global__ void binom(unsigned long *table, const int n)
{
__shared__ unsigned long cache[0x100];
int col = blockDim.x * blockIdx.x + threadIdx.x;
for (int row = 2; row <= n; row++) {
int i = rowOffset(row) + col;
__syncthreads();
if (col == 0) {
table[i] = cache[i & 0xFF] = row;
} else if (col < row / 2) {
int j = rowOffset(row - 1) + col - 1;
cache[i & 0xFF] = cache[j & 0xFF];
if (!(row % 2 == 0 && col == row / 2 - 1))
j++;
cache[i & 0xFF] += cache[j & 0xFF];
table[i] = cache[i & 0xFF];
}
}
}
/*
* Maximum number of rows (specified in program args).
*/
static unsigned int length;
/*
* Parse command line arguments
*/
unsigned int parse(int argc, char ** argv)
{
unsigned int i;
if (argc != 2)
{
printf("Usage: %s n\n", argv[0]);
exit(1);
}
i = atoi(argv[1]);
if (i > 67) {
fprintf(stderr, "Warning: %U is too big, results will be affected by integer overflow.");
}
return i;
}
/*
* Main program accepts one parameter: the number of the row
* of Pascal's triangle to print.
*/
int main (int argc, char ** argv)
{
length = parse(argc, argv);
cudaError_t err = cudaSuccess;
size_t size = rowOffset(length + 1) * sizeof(unsigned long);
unsigned long *table = (unsigned long *)malloc(size);
unsigned long *d_table = NULL;
err = cudaMalloc((void **)&d_table, size);
if (table == NULL) {
fprintf(stderr, "Failed to allocate host table!\n");
exit(EXIT_FAILURE);
}
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device table: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
binom<<<1, length + 1>>>(d_table, length);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch binom kernel: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(table, d_table, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy table from device to host: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_table);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device table: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
unsigned int i = rowOffset(length);
printf("1 %lU", *table);
for (++i; i < rowOffset(length + 1); i++) {
printf("%s%lU", table[i]);
}
if (length % 2 == 0) i--;
for (--i; i >= rowOffset(length); i--) {
printf(" %lU", table[i]);
}
printf(" 1\n");
free(table);
return 0;
}
|
6,242 | #include "includes.h"
__global__ void decrementalColouringNew (int *vertexArray, int *neighbourArray, int n, int m, int *decrementalArray, int size){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= size){
return;
}
int startStart, startStop;
int me, you;
// int otheri;
// bool ipercent2 = false;
me = decrementalArray[i];
if (i%2 == 0){
you = decrementalArray[i+1];
// otheri = i+1;
// ipercent2 = true;
}
else{
you = decrementalArray[i-1];
// otheri = i-1;
}
//printf("I am %d and I am deleting %d - %d\n", i, me, you);
startStart = vertexArray[me-1];
startStop = vertexArray[me];
for (int j=startStart; j<startStop; j++){
if (neighbourArray[j]==you){
neighbourArray[j]=0;
break;
}
}
} |
6,243 | #include "includes.h"
#define MINVAL 1e-7
__global__ void Gaus(double* Mtr, int Size, int i)
{
int index=blockIdx.x*blockDim.x+threadIdx.x;
if(index>i && index< Size)
{
double particial = -Mtr[i*Size+index]/Mtr[i*Size+i];
for(int z=i; z<Size; z++)
{
Mtr[z*Size+index]=Mtr[z*Size+index]+Mtr[z*Size+i]*particial;
}
}
} |
6,244 | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <assert.h>
#include <vector>
#include <utility>
#include <iostream>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
std::pair<float,float> profileCopies(float* h_a,
float* h_b,
float* d,
unsigned int n)
{
unsigned int bytes = n*sizeof(float);
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda(cudaEventCreate(&startEvent));
checkCuda(cudaEventCreate(&stopEvent));
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
//times are in miliseconds
float timeh2d, timed2h;
checkCuda(cudaEventElapsedTime(&timeh2d, startEvent, stopEvent));
checkCuda(cudaEventRecord(startEvent, 0));
checkCuda(cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost));
checkCuda(cudaEventRecord(stopEvent, 0));
checkCuda(cudaEventSynchronize(stopEvent));
checkCuda(cudaEventElapsedTime(&timed2h, startEvent, stopEvent));
for (int i = 0; i<n; ++i) {
if (h_a[i]!=h_b[i]) {
printf("*** transfers failed ***");
break;
}
}
// clean up events
checkCuda(cudaEventDestroy(startEvent));
checkCuda(cudaEventDestroy(stopEvent));
return std::make_pair(timeh2d,timed2h);
}
int main()
{
size_t kb = 1024;
size_t GB = kb*kb*kb;
std::vector<size_t> sizes;
for (size_t current = kb; current<=GB; current *= 2)
sizes.push_back(current/sizeof(float));
std::cout << "#Size; H2DPage; H2DPin; D2HPage; D2HPin" << std::endl;
for (unsigned int nElements: sizes) {
const unsigned int bytes = nElements*sizeof(float);
// host arrays
float* h_aPageable, * h_bPageable;
float* h_aPinned, * h_bPinned;
// device array
float* d_a;
// allocate and initialize
h_aPageable = (float*) malloc(bytes); // host pageable
h_bPageable = (float*) malloc(bytes); // host pageable
checkCuda(cudaMallocHost((void**) &h_aPinned, bytes)); // host pinned
checkCuda(cudaMallocHost((void**) &h_bPinned, bytes)); // host pinned
checkCuda(cudaMalloc((void**) &d_a, bytes)); // device
for (int i = 0; i<nElements; ++i) h_aPageable[i] = i;
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
cudaDeviceProp prop;
checkCuda(cudaGetDeviceProperties(&prop, 0));
// perform copies and report bandwidth
auto tPage = profileCopies(h_aPageable, h_bPageable, d_a, nElements);
auto tPinned = profileCopies(h_aPinned, h_bPinned, d_a, nElements);
std::cout << bytes << ";"
<< tPage.first << ";"
<< tPinned.first << ";"
<< tPage.second << ";"
<< tPinned.second << std::endl;
// cleanup
cudaFree(d_a);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
free(h_bPageable);
}
return 0;
}
|
6,245 | #include "includes.h"
__global__ void kernel2( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy * dimx + ix;
if(iy < dimy && ix < dimx)
a[idx] = (blockIdx.y * gridDim.x) + blockIdx.x;
} |
6,246 | #include "includes.h"
__global__ void multVector(int *d1_in, int *d2_in, int *d_out, int n, int m){
int ind = blockDim.x*blockIdx.x + threadIdx.x;
if(ind<m){
d_out[ind]=0;
for(int i=0;i<n;i++){
d_out[ind]+= d1_in[i]*d2_in[i*m+ind];
}
}
} |
6,247 | //xfail:REPAIR_ERROR
//--blockDim=8 --gridDim=1 --no-inline
// The statically given values for A are not preserved when we translate CUDA
// since the host is free to change the contents of A.
// cf. testsuite/OpenCL/globalarray/pass2
__constant__ int A[8] = {0,1,2,3,4,5,6,7};
__global__ void globalarray(float* p) {
int i = threadIdx.x;
int a = A[i];
if(a != threadIdx.x) {
p[0] = threadIdx.x;
}
}
|
6,248 | extern "C" {
typedef struct {
int e0;
char* e1;
} struct_Buffer_6327;
typedef struct {
struct_Buffer_6327 e0;
struct_Buffer_6327 e1;
int e2;
int e3;
} struct_image_6326;
typedef struct {
struct_Buffer_6327 e0;
int e1;
int e2;
} struct_filter_6332;
__device__ inline int threadIdx_x() { return threadIdx.x; }
__device__ inline int threadIdx_y() { return threadIdx.y; }
__device__ inline int threadIdx_z() { return threadIdx.z; }
__device__ inline int blockIdx_x() { return blockIdx.x; }
__device__ inline int blockIdx_y() { return blockIdx.y; }
__device__ inline int blockIdx_z() { return blockIdx.z; }
__device__ inline int blockDim_x() { return blockDim.x; }
__device__ inline int blockDim_y() { return blockDim.y; }
__device__ inline int blockDim_z() { return blockDim.z; }
__device__ inline int gridDim_x() { return gridDim.x; }
__device__ inline int gridDim_y() { return gridDim.y; }
__device__ inline int gridDim_z() { return gridDim.z; }
__global__ void lambda_25292(struct_image_6326, struct_Buffer_6327, double*, struct_filter_6332);
__global__ void lambda_25546(struct_image_6326, struct_filter_6332, double*, double*, struct_Buffer_6327);
__global__ __launch_bounds__ (128 * 1 * 1) void lambda_25292(struct_image_6326 _25295_30003, struct_Buffer_6327 _25296_30004, double* _25297_30005, struct_filter_6332 _25298_30006) {
__shared__ double ds_img[134][7];
int bdimx_30012;
int pbdimx_30012;
int bdimy_30018;
int pbdimy_30018;
int bidx_30024;
int pbidx_30024;
int bidy_30030;
int pbidy_30030;
int tidx_30036;
int ptidx_30036;
int tidy_30042;
int ptidy_30042;
double* reserve_shared_30050;
double* preserve_shared_30050;
int _30061;
int p_30061;
int _30117;
int p_30117;
int _30183;
int p_30183;
double sum_30185;
double psum_30185;
int _30122;
int p_30122;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bdimx_30012 = blockDim_x();
pbdimx_30012 = bdimx_30012;
l30010: ;
bdimx_30012 = pbdimx_30012;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bdimy_30018 = blockDim_y();
pbdimy_30018 = bdimy_30018;
l30016: ;
bdimy_30018 = pbdimy_30018;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bidx_30024 = blockIdx_x();
pbidx_30024 = bidx_30024;
l30022: ;
bidx_30024 = pbidx_30024;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bidy_30030 = blockIdx_y();
pbidy_30030 = bidy_30030;
l30028: ;
bidy_30030 = pbidy_30030;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
tidx_30036 = threadIdx_x();
ptidx_30036 = tidx_30036;
l30034: ;
tidx_30036 = ptidx_30036;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
tidy_30042 = threadIdx_y();
ptidy_30042 = tidy_30042;
l30040: ;
tidy_30042 = ptidy_30042;
#line 201 "gpu_device_shm.impala"
__shared__ double reserver_reserve_shared_30050[938];
preserve_shared_30050 = reserver_reserve_shared_30050;
l30048: ;
reserve_shared_30050 = preserve_shared_30050;
#line 193 "gpu_device_shm.impala"
int _30054;
_30054 = _25298_30006.e2;
#line 189 "gpu_device_shm.impala"
int _30084;
_30084 = bidy_30030 * bdimy_30018;
#line 11 "main.impala"
int _30081;
_30081 = _25295_30003.e2;
#line 187 "gpu_device_shm.impala"
int _30075;
_30075 = bidx_30024 * bdimx_30012;
#line 11 "main.impala"
int _30090;
_30090 = _25295_30003.e3;
#line 4 "gaussian.impala"
int _30064;
_30064 = _25298_30006.e1;
#line 193 "gpu_device_shm.impala"
int extend_height_30055;
extend_height_30055 = _30054 / 2;
#line 189 "gpu_device_shm.impala"
int gid_y_30085;
gid_y_30085 = _30084 + tidy_30042;
#line 187 "gpu_device_shm.impala"
int gid_x_30076;
gid_x_30076 = _30075 + tidx_30036;
#line 4 "gaussian.impala"
int h_anchor_30065;
h_anchor_30065 = _30064 / 2;
#line 197 "gpu_device_shm.impala"
int _30056;
_30056 = 2 * extend_height_30055;
#line 195 "gpu_device_shm.impala"
int _30066;
_30066 = 2 * h_anchor_30065;
#line 197 "gpu_device_shm.impala"
int shm_dimy_30057;
shm_dimy_30057 = bdimy_30018 + _30056;
#line 195 "gpu_device_shm.impala"
int shm_dimx_30067;
shm_dimx_30067 = bdimx_30012 + _30066;
#line 38 "gpu_device_shm.impala"
bool _30058;
_30058 = 0 < shm_dimy_30057;
#line 38 "gpu_device_shm.impala"
if (_30058) goto l30059; else goto l30234;
l30234: ;
#line 231 "gpu_device_shm.impala"
goto l30166;
l30059: ;
#line 226 "gpu_device_shm.impala"
int _30102;
_30102 = tidy_30042 * shm_dimx_30067;
#line 221 "gpu_device_shm.impala"
int _30086;
_30086 = gid_y_30085 - extend_height_30055;
#line 219 "gpu_device_shm.impala"
int _30077;
_30077 = gid_x_30076 - h_anchor_30065;
#line 217 "gpu_device_shm.impala"
bool _30073;
_30073 = tidy_30042 < shm_dimy_30057;
#line 224 "gpu_device_shm.impala"
bool _30087;
_30087 = 0 <= _30086;
#line 224 "gpu_device_shm.impala"
bool _30091;
_30091 = _30086 < _30090;
#line 227 "gpu_device_shm.impala"
int _30096;
_30096 = _30086 * _30081;
#line 38 "gpu_device_shm.impala"
p_30061 = 0;
goto l30060;
l30060: ;
_30061 = p_30061;
#line 38 "gpu_device_shm.impala"
bool _30068;
_30068 = _30061 < shm_dimx_30067;
#line 38 "gpu_device_shm.impala"
if (_30068) goto l30069; else goto l30115;
l30115: ;
#line 38 "gpu_device_shm.impala"
p_30117 = bdimy_30018;
goto l30116;
l30116: ;
_30117 = p_30117;
#line 38 "gpu_device_shm.impala"
bool _30119;
_30119 = _30117 < shm_dimy_30057;
#line 38 "gpu_device_shm.impala"
if (_30119) goto l30120; else goto l30165;
l30165: ;
#line 231 "gpu_device_shm.impala"
goto l30166;
l30166: ;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
__syncthreads();
l30171: ;
#line 237 "gpu_device_shm.impala"
bool _30173;
_30173 = gid_x_30076 < _30081;
#line 237 "gpu_device_shm.impala"
if (_30173) goto l30174; else goto l30233;
l30233: ;
#line 240 "gpu_device_shm.impala"
goto l30232;
l30174: ;
#line 237 "gpu_device_shm.impala"
bool _30175;
_30175 = gid_y_30085 < _30090;
#line 237 "gpu_device_shm.impala"
if (_30175) goto l30176; else goto l30231;
l30231: ;
#line 240 "gpu_device_shm.impala"
goto l30232;
l30232: ;
return ;
l30176: ;
#line 64 "gpu_device_shm.impala"
char* _30213;
_30213 = _25296_30004.e1;
#line 204 "gpu_device_shm.impala"
int _30198;
_30198 = extend_height_30055 - _30084;
#line 64 "gpu_device_shm.impala"
double* _30214;
union { double* dst; char* src; } u_30214;
u_30214.src = _30213;
_30214 = u_30214.dst;
#line 203 "gpu_device_shm.impala"
int _30203;
_30203 = h_anchor_30065 - _30075;
#line 64 "gpu_device_shm.impala"
int _30215;
_30215 = gid_y_30085 * _30081;
#line 17 "gaussian.impala"
bool _30177;
_30177 = h_anchor_30065 <= gid_x_30076;
#line 64 "gpu_device_shm.impala"
int _30216;
_30216 = _30215 + gid_x_30076;
#line 72 "gpu_device_shm.impala"
int _30199;
_30199 = gid_y_30085 + _30198;
#line 64 "gpu_device_shm.impala"
double* _30217;
_30217 = _30214 + _30216;
#line 72 "gpu_device_shm.impala"
int _30200;
_30200 = _30199 * shm_dimx_30067;
#line 17 "gaussian.impala"
if (_30177) goto l30178; else goto l30230;
l30230: ;
#line 27 "gaussian.impala"
goto l30222;
l30178: ;
#line 17 "gaussian.impala"
int _30179;
_30179 = _30081 - h_anchor_30065;
#line 17 "gaussian.impala"
bool _30180;
_30180 = gid_x_30076 < _30179;
#line 17 "gaussian.impala"
if (_30180) goto l30181; else goto l30221;
l30221: ;
#line 27 "gaussian.impala"
goto l30222;
l30222: ;
#line 72 "gpu_device_shm.impala"
int _30223;
_30223 = _30200 + gid_x_30076;
#line 72 "gpu_device_shm.impala"
int _30224;
_30224 = _30223 + _30203;
#line 72 "gpu_device_shm.impala"
double* _30225;
_30225 = reserve_shared_30050 + _30224;
#line 72 "gpu_device_shm.impala"
double _30226;
_30226 = *_30225;
#line 72 "gpu_device_shm.impala"
double _30228;
_30228 = _30226;
#line 64 "gpu_device_shm.impala"
*_30217 = _30228;
return ;
l30181: ;
#line 77 "gpu_device_shm.impala"
struct_Buffer_6327 _30191;
_30191 = _25298_30006.e0;
#line 19 "gaussian.impala"
int _30219;
_30219 = 0 - h_anchor_30065;
#line 77 "gpu_device_shm.impala"
char* _30192;
_30192 = _30191.e1;
#line 19 "gaussian.impala"
int _30187;
_30187 = 1 + h_anchor_30065;
#line 77 "gpu_device_shm.impala"
double* _30193;
union { double* dst; char* src; } u_30193;
u_30193.src = _30192;
_30193 = u_30193.dst;
#line 27 "gpu_device_shm.impala"
p_30183 = _30219;
psum_30185 = 0.000000e+00;
goto l30182;
l30182: ;
_30183 = p_30183;
sum_30185 = psum_30185;
#line 27 "gpu_device_shm.impala"
bool _30188;
_30188 = _30183 < _30187;
#line 27 "gpu_device_shm.impala"
if (_30188) goto l30189; else goto l30212;
l30212: ;
#line 64 "gpu_device_shm.impala"
*_30217 = sum_30185;
return ;
l30189: ;
#line 31 "gpu_device_shm.impala"
int _30190;
_30190 = 1 + _30183;
#line 21 "gaussian.impala"
int _30201;
_30201 = gid_x_30076 + _30183;
#line 72 "gpu_device_shm.impala"
int _30202;
_30202 = _30200 + _30201;
#line 21 "gaussian.impala"
int _30194;
_30194 = _30183 + h_anchor_30065;
#line 72 "gpu_device_shm.impala"
int _30204;
_30204 = _30202 + _30203;
#line 76 "gpu_device_shm.impala"
double* i_30195;
i_30195 = _30193 + _30194;
#line 72 "gpu_device_shm.impala"
double* _30205;
_30205 = reserve_shared_30050 + _30204;
#line 77 "gpu_device_shm.impala"
double _30196;
_30196 = *i_30195;
#line 77 "gpu_device_shm.impala"
double _30208;
_30208 = _30196;
#line 72 "gpu_device_shm.impala"
double _30206;
_30206 = *_30205;
#line 72 "gpu_device_shm.impala"
double _30209;
_30209 = _30206;
#line 21 "gaussian.impala"
double _30210;
_30210 = _30208 * _30209;
#line 21 "gaussian.impala"
double _30211;
_30211 = sum_30185 + _30210;
#line 27 "gpu_device_shm.impala"
p_30183 = _30190;
psum_30185 = _30211;
goto l30182;
l30120: ;
#line 221 "gpu_device_shm.impala"
int img_index_y_30137;
img_index_y_30137 = _30086 + _30117;
#line 214 "gpu_device_shm.impala"
int shm_index_y_30129;
shm_index_y_30129 = tidy_30042 + _30117;
#line 224 "gpu_device_shm.impala"
bool _30138;
_30138 = 0 <= img_index_y_30137;
#line 227 "gpu_device_shm.impala"
int _30145;
_30145 = img_index_y_30137 * _30081;
#line 224 "gpu_device_shm.impala"
bool _30140;
_30140 = img_index_y_30137 < _30090;
#line 217 "gpu_device_shm.impala"
bool _30130;
_30130 = shm_index_y_30129 < shm_dimy_30057;
#line 226 "gpu_device_shm.impala"
int _30150;
_30150 = shm_index_y_30129 * shm_dimx_30067;
#line 38 "gpu_device_shm.impala"
p_30122 = 0;
goto l30121;
l30121: ;
_30122 = p_30122;
#line 38 "gpu_device_shm.impala"
bool _30124;
_30124 = _30122 < shm_dimx_30067;
#line 38 "gpu_device_shm.impala"
if (_30124) goto l30125; else goto l30163;
l30163: ;
#line 42 "gpu_device_shm.impala"
int _30164;
_30164 = _30117 + bdimy_30018;
#line 38 "gpu_device_shm.impala"
p_30117 = _30164;
goto l30116;
l30125: ;
#line 212 "gpu_device_shm.impala"
int shm_index_x_30126;
shm_index_x_30126 = tidx_30036 + _30122;
#line 217 "gpu_device_shm.impala"
bool _30127;
_30127 = shm_index_x_30126 < shm_dimx_30067;
#line 217 "gpu_device_shm.impala"
if (_30127) goto l30128; else goto l30162;
l30162: ;
#line 229 "gpu_device_shm.impala"
goto l30161;
l30128: ;
#line 217 "gpu_device_shm.impala"
if (_30130) goto l30131; else goto l30160;
l30160: ;
#line 229 "gpu_device_shm.impala"
goto l30161;
l30161: ;
#line 40 "gpu_device_shm.impala"
goto l30142;
l30131: ;
#line 219 "gpu_device_shm.impala"
int img_index_x_30132;
img_index_x_30132 = _30077 + _30122;
#line 224 "gpu_device_shm.impala"
bool _30133;
_30133 = 0 <= img_index_x_30132;
#line 224 "gpu_device_shm.impala"
if (_30133) goto l30134; else goto l30159;
l30159: ;
#line 228 "gpu_device_shm.impala"
goto l30156;
l30134: ;
#line 224 "gpu_device_shm.impala"
bool _30135;
_30135 = img_index_x_30132 < _30081;
#line 224 "gpu_device_shm.impala"
if (_30135) goto l30136; else goto l30158;
l30158: ;
#line 228 "gpu_device_shm.impala"
goto l30156;
l30136: ;
#line 224 "gpu_device_shm.impala"
if (_30138) goto l30139; else goto l30157;
l30157: ;
#line 228 "gpu_device_shm.impala"
goto l30156;
l30139: ;
#line 224 "gpu_device_shm.impala"
if (_30140) goto l30141; else goto l30155;
l30155: ;
#line 228 "gpu_device_shm.impala"
goto l30156;
l30156: ;
#line 40 "gpu_device_shm.impala"
goto l30142;
l30141: ;
#line 226 "gpu_device_shm.impala"
int _30151;
_30151 = _30150 + shm_index_x_30126;
#line 226 "gpu_device_shm.impala"
double* _30152;
_30152 = reserve_shared_30050 + _30151;
#line 227 "gpu_device_shm.impala"
int _30146;
_30146 = _30145 + img_index_x_30132;
#line 227 "gpu_device_shm.impala"
double* _30147;
_30147 = _25297_30005 + _30146;
#line 227 "gpu_device_shm.impala"
double _30148;
_30148 = *_30147;
#line 227 "gpu_device_shm.impala"
double _30153;
_30153 = _30148;
#line 226 "gpu_device_shm.impala"
*_30152 = _30153;
#line 40 "gpu_device_shm.impala"
goto l30142;
l30142: ;
#line 42 "gpu_device_shm.impala"
int _30144;
_30144 = _30122 + bdimx_30012;
#line 38 "gpu_device_shm.impala"
p_30122 = _30144;
goto l30121;
l30069: ;
#line 212 "gpu_device_shm.impala"
int shm_index_x_30070;
shm_index_x_30070 = tidx_30036 + _30061;
#line 217 "gpu_device_shm.impala"
bool _30071;
_30071 = shm_index_x_30070 < shm_dimx_30067;
#line 217 "gpu_device_shm.impala"
if (_30071) goto l30072; else goto l30114;
l30114: ;
#line 229 "gpu_device_shm.impala"
goto l30113;
l30072: ;
#line 217 "gpu_device_shm.impala"
if (_30073) goto l30074; else goto l30112;
l30112: ;
#line 229 "gpu_device_shm.impala"
goto l30113;
l30113: ;
#line 40 "gpu_device_shm.impala"
goto l30093;
l30074: ;
#line 219 "gpu_device_shm.impala"
int img_index_x_30078;
img_index_x_30078 = _30077 + _30061;
#line 224 "gpu_device_shm.impala"
bool _30079;
_30079 = 0 <= img_index_x_30078;
#line 224 "gpu_device_shm.impala"
if (_30079) goto l30080; else goto l30111;
l30111: ;
#line 228 "gpu_device_shm.impala"
goto l30108;
l30080: ;
#line 224 "gpu_device_shm.impala"
bool _30082;
_30082 = img_index_x_30078 < _30081;
#line 224 "gpu_device_shm.impala"
if (_30082) goto l30083; else goto l30110;
l30110: ;
#line 228 "gpu_device_shm.impala"
goto l30108;
l30083: ;
#line 224 "gpu_device_shm.impala"
if (_30087) goto l30088; else goto l30109;
l30109: ;
#line 228 "gpu_device_shm.impala"
goto l30108;
l30088: ;
#line 224 "gpu_device_shm.impala"
if (_30091) goto l30092; else goto l30107;
l30107: ;
#line 228 "gpu_device_shm.impala"
goto l30108;
l30108: ;
#line 40 "gpu_device_shm.impala"
goto l30093;
l30092: ;
#line 226 "gpu_device_shm.impala"
int _30103;
_30103 = _30102 + shm_index_x_30070;
#line 226 "gpu_device_shm.impala"
double* _30104;
_30104 = reserve_shared_30050 + _30103;
#line 227 "gpu_device_shm.impala"
int _30097;
_30097 = _30096 + img_index_x_30078;
#line 227 "gpu_device_shm.impala"
double* _30098;
_30098 = _25297_30005 + _30097;
#line 227 "gpu_device_shm.impala"
double _30099;
_30099 = *_30098;
#line 227 "gpu_device_shm.impala"
double _30105;
_30105 = _30099;
#line 226 "gpu_device_shm.impala"
*_30104 = _30105;
#line 40 "gpu_device_shm.impala"
goto l30093;
l30093: ;
#line 42 "gpu_device_shm.impala"
int _30095;
_30095 = _30061 + bdimx_30012;
#line 38 "gpu_device_shm.impala"
p_30061 = _30095;
goto l30060;
}
__global__ __launch_bounds__ (128 * 1 * 1) void lambda_25546(struct_image_6326 _25549_30238, struct_filter_6332 _25550_30239, double* _25551_30240, double* _25552_30241, struct_Buffer_6327 _25553_30242) {
__shared__ double ds_img[134][7];
int bdimx_30245;
int pbdimx_30245;
int bdimy_30248;
int pbdimy_30248;
int bidx_30251;
int pbidx_30251;
int bidy_30254;
int pbidy_30254;
int tidx_30257;
int ptidx_30257;
int tidy_30260;
int ptidy_30260;
double* reserve_shared_30263;
double* preserve_shared_30263;
int _30271;
int p_30271;
int _30324;
int p_30324;
int _30387;
int p_30387;
double sum_30389;
double psum_30389;
int _30329;
int p_30329;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bdimx_30245 = blockDim_x();
pbdimx_30245 = bdimx_30245;
l30243: ;
bdimx_30245 = pbdimx_30245;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bdimy_30248 = blockDim_y();
pbdimy_30248 = bdimy_30248;
l30246: ;
bdimy_30248 = pbdimy_30248;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bidx_30251 = blockIdx_x();
pbidx_30251 = bidx_30251;
l30249: ;
bidx_30251 = pbidx_30251;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
bidy_30254 = blockIdx_y();
pbidy_30254 = bidy_30254;
l30252: ;
bidy_30254 = pbidy_30254;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
tidx_30257 = threadIdx_x();
ptidx_30257 = tidx_30257;
l30255: ;
tidx_30257 = ptidx_30257;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
tidy_30260 = threadIdx_y();
ptidy_30260 = tidy_30260;
l30258: ;
tidy_30260 = ptidy_30260;
#line 201 "gpu_device_shm.impala"
__shared__ double reserver_reserve_shared_30263[938];
preserve_shared_30263 = reserver_reserve_shared_30263;
l30261: ;
reserve_shared_30263 = preserve_shared_30263;
#line 187 "gpu_device_shm.impala"
int _30284;
_30284 = bidx_30251 * bdimx_30245;
#line 11 "main.impala"
int _30298;
_30298 = _25549_30238.e3;
#line 187 "gpu_device_shm.impala"
int gid_x_30285;
gid_x_30285 = _30284 + tidx_30257;
#line 6 "gaussian.impala"
int _30264;
_30264 = _25550_30239.e2;
#line 191 "gpu_device_shm.impala"
int _30273;
_30273 = _25550_30239.e1;
#line 189 "gpu_device_shm.impala"
int _30293;
_30293 = bidy_30254 * bdimy_30248;
#line 11 "main.impala"
int _30290;
_30290 = _25549_30238.e2;
#line 189 "gpu_device_shm.impala"
int gid_y_30294;
gid_y_30294 = _30293 + tidy_30260;
#line 191 "gpu_device_shm.impala"
int extend_width_30274;
extend_width_30274 = _30273 / 2;
#line 6 "gaussian.impala"
int v_anchor_30265;
v_anchor_30265 = _30264 / 2;
#line 195 "gpu_device_shm.impala"
int _30275;
_30275 = 2 * extend_width_30274;
#line 197 "gpu_device_shm.impala"
int _30266;
_30266 = 2 * v_anchor_30265;
#line 195 "gpu_device_shm.impala"
int shm_dimx_30276;
shm_dimx_30276 = bdimx_30245 + _30275;
#line 197 "gpu_device_shm.impala"
int shm_dimy_30267;
shm_dimy_30267 = bdimy_30248 + _30266;
#line 38 "gpu_device_shm.impala"
bool _30268;
_30268 = 0 < shm_dimy_30267;
#line 38 "gpu_device_shm.impala"
if (_30268) goto l30269; else goto l30438;
l30438: ;
#line 231 "gpu_device_shm.impala"
goto l30373;
l30269: ;
#line 226 "gpu_device_shm.impala"
int _30309;
_30309 = tidy_30260 * shm_dimx_30276;
#line 219 "gpu_device_shm.impala"
int _30286;
_30286 = gid_x_30285 - extend_width_30274;
#line 221 "gpu_device_shm.impala"
int _30295;
_30295 = gid_y_30294 - v_anchor_30265;
#line 217 "gpu_device_shm.impala"
bool _30282;
_30282 = tidy_30260 < shm_dimy_30267;
#line 224 "gpu_device_shm.impala"
bool _30296;
_30296 = 0 <= _30295;
#line 224 "gpu_device_shm.impala"
bool _30299;
_30299 = _30295 < _30298;
#line 227 "gpu_device_shm.impala"
int _30304;
_30304 = _30295 * _30290;
#line 38 "gpu_device_shm.impala"
p_30271 = 0;
goto l30270;
l30270: ;
_30271 = p_30271;
#line 38 "gpu_device_shm.impala"
bool _30277;
_30277 = _30271 < shm_dimx_30276;
#line 38 "gpu_device_shm.impala"
if (_30277) goto l30278; else goto l30322;
l30322: ;
#line 38 "gpu_device_shm.impala"
p_30324 = bdimy_30248;
goto l30323;
l30323: ;
_30324 = p_30324;
#line 38 "gpu_device_shm.impala"
bool _30326;
_30326 = _30324 < shm_dimy_30267;
#line 38 "gpu_device_shm.impala"
if (_30326) goto l30327; else goto l30372;
l30372: ;
#line 231 "gpu_device_shm.impala"
goto l30373;
l30373: ;
#line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala"
__syncthreads();
l30375: ;
#line 237 "gpu_device_shm.impala"
bool _30377;
_30377 = gid_x_30285 < _30290;
#line 237 "gpu_device_shm.impala"
if (_30377) goto l30378; else goto l30437;
l30437: ;
#line 240 "gpu_device_shm.impala"
goto l30436;
l30378: ;
#line 237 "gpu_device_shm.impala"
bool _30379;
_30379 = gid_y_30294 < _30298;
#line 237 "gpu_device_shm.impala"
if (_30379) goto l30380; else goto l30435;
l30435: ;
#line 240 "gpu_device_shm.impala"
goto l30436;
l30436: ;
return ;
l30380: ;
#line 204 "gpu_device_shm.impala"
int _30402;
_30402 = v_anchor_30265 - _30293;
#line 64 "gpu_device_shm.impala"
char* _30416;
_30416 = _25553_30242.e1;
#line 203 "gpu_device_shm.impala"
int _30406;
_30406 = extend_width_30274 - _30284;
#line 64 "gpu_device_shm.impala"
int _30418;
_30418 = gid_y_30294 * _30290;
#line 64 "gpu_device_shm.impala"
int _30419;
_30419 = _30418 + gid_x_30285;
#line 64 "gpu_device_shm.impala"
double* _30417;
union { double* dst; char* src; } u_30417;
u_30417.src = _30416;
_30417 = u_30417.dst;
#line 39 "gaussian.impala"
bool _30381;
_30381 = v_anchor_30265 <= gid_y_30294;
#line 64 "gpu_device_shm.impala"
double* _30420;
_30420 = _30417 + _30419;
#line 39 "gaussian.impala"
if (_30381) goto l30382; else goto l30434;
l30434: ;
#line 49 "gaussian.impala"
goto l30424;
l30382: ;
#line 39 "gaussian.impala"
int _30383;
_30383 = _30298 - v_anchor_30265;
#line 39 "gaussian.impala"
bool _30384;
_30384 = gid_y_30294 < _30383;
#line 39 "gaussian.impala"
if (_30384) goto l30385; else goto l30423;
l30423: ;
#line 49 "gaussian.impala"
goto l30424;
l30424: ;
#line 72 "gpu_device_shm.impala"
int _30425;
_30425 = gid_y_30294 + _30402;
#line 72 "gpu_device_shm.impala"
int _30426;
_30426 = _30425 * shm_dimx_30276;
#line 72 "gpu_device_shm.impala"
int _30427;
_30427 = _30426 + gid_x_30285;
#line 72 "gpu_device_shm.impala"
int _30428;
_30428 = _30427 + _30406;
#line 72 "gpu_device_shm.impala"
double* _30429;
_30429 = reserve_shared_30263 + _30428;
#line 72 "gpu_device_shm.impala"
double _30430;
_30430 = *_30429;
#line 72 "gpu_device_shm.impala"
double _30432;
_30432 = _30430;
#line 64 "gpu_device_shm.impala"
*_30420 = _30432;
return ;
l30385: ;
#line 41 "gaussian.impala"
int _30422;
_30422 = 0 - v_anchor_30265;
#line 77 "gpu_device_shm.impala"
struct_Buffer_6327 _30394;
_30394 = _25550_30239.e0;
#line 41 "gaussian.impala"
int _30390;
_30390 = 1 + v_anchor_30265;
#line 77 "gpu_device_shm.impala"
char* _30395;
_30395 = _30394.e1;
#line 77 "gpu_device_shm.impala"
double* _30396;
union { double* dst; char* src; } u_30396;
u_30396.src = _30395;
_30396 = u_30396.dst;
#line 27 "gpu_device_shm.impala"
p_30387 = _30422;
psum_30389 = 0.000000e+00;
goto l30386;
l30386: ;
_30387 = p_30387;
sum_30389 = psum_30389;
#line 27 "gpu_device_shm.impala"
bool _30391;
_30391 = _30387 < _30390;
#line 27 "gpu_device_shm.impala"
if (_30391) goto l30392; else goto l30415;
l30415: ;
#line 64 "gpu_device_shm.impala"
*_30420 = sum_30389;
return ;
l30392: ;
#line 43 "gaussian.impala"
int _30401;
_30401 = gid_y_30294 + _30387;
#line 43 "gaussian.impala"
int _30397;
_30397 = _30387 + v_anchor_30265;
#line 31 "gpu_device_shm.impala"
int _30393;
_30393 = 1 + _30387;
#line 72 "gpu_device_shm.impala"
int _30403;
_30403 = _30401 + _30402;
#line 76 "gpu_device_shm.impala"
double* i_30398;
i_30398 = _30396 + _30397;
#line 72 "gpu_device_shm.impala"
int _30404;
_30404 = _30403 * shm_dimx_30276;
#line 77 "gpu_device_shm.impala"
double _30399;
_30399 = *i_30398;
#line 72 "gpu_device_shm.impala"
int _30405;
_30405 = _30404 + gid_x_30285;
#line 77 "gpu_device_shm.impala"
double _30411;
_30411 = _30399;
#line 72 "gpu_device_shm.impala"
int _30407;
_30407 = _30405 + _30406;
#line 72 "gpu_device_shm.impala"
double* _30408;
_30408 = reserve_shared_30263 + _30407;
#line 72 "gpu_device_shm.impala"
double _30409;
_30409 = *_30408;
#line 72 "gpu_device_shm.impala"
double _30412;
_30412 = _30409;
#line 43 "gaussian.impala"
double _30413;
_30413 = _30411 * _30412;
#line 43 "gaussian.impala"
double _30414;
_30414 = sum_30389 + _30413;
#line 27 "gpu_device_shm.impala"
p_30387 = _30393;
psum_30389 = _30414;
goto l30386;
l30327: ;
#line 221 "gpu_device_shm.impala"
int img_index_y_30344;
img_index_y_30344 = _30295 + _30324;
#line 224 "gpu_device_shm.impala"
bool _30347;
_30347 = img_index_y_30344 < _30298;
#line 224 "gpu_device_shm.impala"
bool _30345;
_30345 = 0 <= img_index_y_30344;
#line 214 "gpu_device_shm.impala"
int shm_index_y_30336;
shm_index_y_30336 = tidy_30260 + _30324;
#line 227 "gpu_device_shm.impala"
int _30352;
_30352 = img_index_y_30344 * _30290;
#line 217 "gpu_device_shm.impala"
bool _30337;
_30337 = shm_index_y_30336 < shm_dimy_30267;
#line 226 "gpu_device_shm.impala"
int _30357;
_30357 = shm_index_y_30336 * shm_dimx_30276;
#line 38 "gpu_device_shm.impala"
p_30329 = 0;
goto l30328;
l30328: ;
_30329 = p_30329;
#line 38 "gpu_device_shm.impala"
bool _30331;
_30331 = _30329 < shm_dimx_30276;
#line 38 "gpu_device_shm.impala"
if (_30331) goto l30332; else goto l30370;
l30370: ;
#line 42 "gpu_device_shm.impala"
int _30371;
_30371 = _30324 + bdimy_30248;
#line 38 "gpu_device_shm.impala"
p_30324 = _30371;
goto l30323;
l30332: ;
#line 212 "gpu_device_shm.impala"
int shm_index_x_30333;
shm_index_x_30333 = tidx_30257 + _30329;
#line 217 "gpu_device_shm.impala"
bool _30334;
_30334 = shm_index_x_30333 < shm_dimx_30276;
#line 217 "gpu_device_shm.impala"
if (_30334) goto l30335; else goto l30369;
l30369: ;
#line 229 "gpu_device_shm.impala"
goto l30368;
l30335: ;
#line 217 "gpu_device_shm.impala"
if (_30337) goto l30338; else goto l30367;
l30367: ;
#line 229 "gpu_device_shm.impala"
goto l30368;
l30368: ;
#line 40 "gpu_device_shm.impala"
goto l30349;
l30338: ;
#line 219 "gpu_device_shm.impala"
int img_index_x_30339;
img_index_x_30339 = _30286 + _30329;
#line 224 "gpu_device_shm.impala"
bool _30340;
_30340 = 0 <= img_index_x_30339;
#line 224 "gpu_device_shm.impala"
if (_30340) goto l30341; else goto l30366;
l30366: ;
#line 228 "gpu_device_shm.impala"
goto l30363;
l30341: ;
#line 224 "gpu_device_shm.impala"
bool _30342;
_30342 = img_index_x_30339 < _30290;
#line 224 "gpu_device_shm.impala"
if (_30342) goto l30343; else goto l30365;
l30365: ;
#line 228 "gpu_device_shm.impala"
goto l30363;
l30343: ;
#line 224 "gpu_device_shm.impala"
if (_30345) goto l30346; else goto l30364;
l30364: ;
#line 228 "gpu_device_shm.impala"
goto l30363;
l30346: ;
#line 224 "gpu_device_shm.impala"
if (_30347) goto l30348; else goto l30362;
l30362: ;
#line 228 "gpu_device_shm.impala"
goto l30363;
l30363: ;
#line 40 "gpu_device_shm.impala"
goto l30349;
l30348: ;
#line 227 "gpu_device_shm.impala"
int _30353;
_30353 = _30352 + img_index_x_30339;
#line 227 "gpu_device_shm.impala"
double* _30354;
_30354 = _25551_30240 + _30353;
#line 227 "gpu_device_shm.impala"
double _30355;
_30355 = *_30354;
#line 226 "gpu_device_shm.impala"
int _30358;
_30358 = _30357 + shm_index_x_30333;
#line 227 "gpu_device_shm.impala"
double _30360;
_30360 = _30355;
#line 226 "gpu_device_shm.impala"
double* _30359;
_30359 = reserve_shared_30263 + _30358;
#line 226 "gpu_device_shm.impala"
*_30359 = _30360;
#line 40 "gpu_device_shm.impala"
goto l30349;
l30349: ;
#line 42 "gpu_device_shm.impala"
int _30351;
_30351 = _30329 + bdimx_30245;
#line 38 "gpu_device_shm.impala"
p_30329 = _30351;
goto l30328;
l30278: ;
#line 212 "gpu_device_shm.impala"
int shm_index_x_30279;
shm_index_x_30279 = tidx_30257 + _30271;
#line 217 "gpu_device_shm.impala"
bool _30280;
_30280 = shm_index_x_30279 < shm_dimx_30276;
#line 217 "gpu_device_shm.impala"
if (_30280) goto l30281; else goto l30321;
l30321: ;
#line 229 "gpu_device_shm.impala"
goto l30320;
l30281: ;
#line 217 "gpu_device_shm.impala"
if (_30282) goto l30283; else goto l30319;
l30319: ;
#line 229 "gpu_device_shm.impala"
goto l30320;
l30320: ;
#line 40 "gpu_device_shm.impala"
goto l30301;
l30283: ;
#line 219 "gpu_device_shm.impala"
int img_index_x_30287;
img_index_x_30287 = _30286 + _30271;
#line 224 "gpu_device_shm.impala"
bool _30288;
_30288 = 0 <= img_index_x_30287;
#line 224 "gpu_device_shm.impala"
if (_30288) goto l30289; else goto l30318;
l30318: ;
#line 228 "gpu_device_shm.impala"
goto l30315;
l30289: ;
#line 224 "gpu_device_shm.impala"
bool _30291;
_30291 = img_index_x_30287 < _30290;
#line 224 "gpu_device_shm.impala"
if (_30291) goto l30292; else goto l30317;
l30317: ;
#line 228 "gpu_device_shm.impala"
goto l30315;
l30292: ;
#line 224 "gpu_device_shm.impala"
if (_30296) goto l30297; else goto l30316;
l30316: ;
#line 228 "gpu_device_shm.impala"
goto l30315;
l30297: ;
#line 224 "gpu_device_shm.impala"
if (_30299) goto l30300; else goto l30314;
l30314: ;
#line 228 "gpu_device_shm.impala"
goto l30315;
l30315: ;
#line 40 "gpu_device_shm.impala"
goto l30301;
l30300: ;
#line 226 "gpu_device_shm.impala"
int _30310;
_30310 = _30309 + shm_index_x_30279;
#line 227 "gpu_device_shm.impala"
int _30305;
_30305 = _30304 + img_index_x_30287;
#line 226 "gpu_device_shm.impala"
double* _30311;
_30311 = reserve_shared_30263 + _30310;
#line 227 "gpu_device_shm.impala"
double* _30306;
_30306 = _25551_30240 + _30305;
#line 227 "gpu_device_shm.impala"
double _30307;
_30307 = *_30306;
#line 227 "gpu_device_shm.impala"
double _30312;
_30312 = _30307;
#line 226 "gpu_device_shm.impala"
*_30311 = _30312;
#line 40 "gpu_device_shm.impala"
goto l30301;
l30301: ;
#line 42 "gpu_device_shm.impala"
int _30303;
_30303 = _30271 + bdimx_30245;
#line 38 "gpu_device_shm.impala"
p_30271 = _30303;
goto l30270;
}
} |
6,249 | //
// Created by alex on 7/16/20.
//
#include <cstdio>
#include <arpa/inet.h>
#include <iostream>
#include "udp_transport.cuh"
UdpTransport::UdpTransport(string localAddr, string mcastAddr, eTransportRole role) {
s_localAddr = localAddr;
s_mcastAddr = mcastAddr;
n_mcastPort = 6655; //TODO: does this matter?
n_localPort = 6655; //TODO: does this matter?
bzero( &g_localAddr, sizeof( g_localAddr ) );
bzero( &g_mcastAddr, sizeof( g_mcastAddr ) );
// Creating socket file descriptor
if ((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) {
cerr << "ERROR UdpTransport - Failed to create socket " << errno << endl;
exit(EXIT_FAILURE);
}
cout << "Created local UDP socket: " << sockfd << endl;
if(role == eTransportRole::SENSOR)
{
memset((char *) &g_mcastAddr, 0, sizeof(g_mcastAddr));
g_mcastAddr.sin_family = AF_INET;
g_mcastAddr.sin_addr.s_addr = inet_addr(s_mcastAddr.c_str());
g_mcastAddr.sin_port = htons(n_mcastPort);
char loopch=0;
if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_LOOP,
(char *)&loopch,
sizeof(loopch)) < 0) {
cerr << "ERROR UdpTransport - disable loop back failed set opt " << errno << endl;
close(sockfd);
exit(EXIT_FAILURE);
}
inet_pton(AF_INET, s_localAddr.c_str(), &this->g_localAddr.sin_addr);
if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_IF,
(char *)&g_localAddr,
sizeof(g_localAddr)) < 0) {
cerr << "ERROR UdpTransport - Setting up local interface " << strerror(errno) << endl;
exit(1);
}
} else if(role == eTransportRole::PROCESSOR) {
{
int reuse=1;
if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR,
(char *)&reuse, sizeof(reuse)) < 0) {
cerr << "ERROR UdpTransport - set option to allow socket reuse " << errno << endl;
close(sockfd);
exit(EXIT_FAILURE);
}
}
//Create the SockAddr for the Local System
memset(&g_localAddr, 0, sizeof(g_localAddr));
g_localAddr.sin_family = AF_INET;
g_localAddr.sin_port = htons(n_localPort);
g_localAddr.sin_addr.s_addr = INADDR_ANY;
cout << "Bind the socket local address: " << s_localAddr << endl;
if (bind(sockfd, (const struct sockaddr *) &g_localAddr,
sizeof(g_localAddr)) < 0) {
cerr << "ERROR UdpTransport - failed to bind to local socket " << errno << endl;
exit(EXIT_FAILURE);
}
if (inet_pton(AF_INET, s_mcastAddr.c_str(), &mcastGroup.imr_multiaddr.s_addr) != 1) {
cerr << "ERROR inet pton can't convert address " << errno << endl;
close(sockfd);
exit(EXIT_FAILURE);
}
if (inet_pton(AF_INET, s_localAddr.c_str(), &mcastGroup.imr_interface.s_addr) != 1) {
cerr << "ERROR inet pton can't convert address " << errno << endl;
close(sockfd);
exit(EXIT_FAILURE);
}
if (setsockopt(sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
(char *)&mcastGroup, sizeof(mcastGroup)) < 0) {
cerr << "ERROR UdpTransport - couldn't join mcast group " << errno << endl;
close(sockfd);
exit(EXIT_FAILURE);
}
}
}
int UdpTransport::push(Message* m)
{
if(sendto(sockfd, (const char *)m->buffer, m->bufferSize,0,
(const struct sockaddr *) &this->g_mcastAddr,
sizeof(this->g_mcastAddr)) <= 0)
{
cerr << "ERROR UdpTransport Push - failed sendto operation " << errno << endl;
close(sockfd);
exit(EXIT_FAILURE);
}
DEBUG("To " << inet_ntoa(g_mcastAddr.sin_addr) << endl);
#ifdef DEBUG_BUILD
printMessage(m, 32);
#endif
return 0;
}
/*
* Pulls a message from the transport and places it in the buffer
*/
int UdpTransport::pop(Message** m, int numReqMsg, int& numRetMsg, eTransportDest dest)
{
uint8_t buffer[MSG_MAX_SIZE]; // receive buffer
int recvlen; // num bytes received
//struct sockaddr_in from; // Sender's address. TODO: Don't need these, just waste perf
//int fromlen; // Length of sender's address.
DEBUG("waiting on socket " << this->n_localPort << endl);
for(int i = 0; i < numReqMsg; i++)
{
//recvlen = sockfd, buffer, MSG_MAX_SIZE);
recvlen = recv(this->sockfd, buffer, 1000, 0);
if (recvlen > 0) {
//cout << "received " << recvlen << " bytes " << "from " << inet_ntoa(from.sin_addr) << endl;
m[i] = createMessage();
m[i]->seqNumber = i;
m[i]->interval = 0;
m[i]->bufferSize = recvlen;
memcpy(buffer,m[i]->buffer,recvlen); //TODO: smarter way than a copy?
numRetMsg = numRetMsg + 1;
} else if(recvlen == -1) {
cerr << "ERROR UdpTransport Pop - failed mcast socket read " << errno << endl;
close(sockfd);
exit(EXIT_FAILURE);
}
}
return 0;
}
Message* UdpTransport::createMessage() {
Message* m = NULL;
std::size_t t = sizeof(Message);
m = static_cast<Message*>(malloc(t));
return m;
}
int UdpTransport::freeMessage(Message* m)
{
free(m);
return 0;
} |
6,250 | #include "includes.h"
__global__ void swap(unsigned int *in, unsigned int *in_pos, unsigned int *out, unsigned int *out_pos, unsigned int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
in[i] = in[i] ^ out[i];
out[i] = in[i] ^ out[i];
in[i] = in[i] ^ out[i];
in_pos[i] = in_pos[i] ^ out_pos[i];
out_pos[i] = in_pos[i] ^ out_pos[i];
in_pos[i] = in_pos[i] ^ out_pos[i];
}
} |
6,251 | #include "includes.h"
__global__ void mirrorImage_kernel(uint width, uint height, uint border, uint borderWidth, uint borderHeight, float* devInput, float* devOutput) {
int x0 = blockDim.x * blockIdx.x + threadIdx.x;
int y0 = blockDim.y * blockIdx.y + threadIdx.y;
if ((x0 < borderWidth) && (y0 < borderHeight)) {
int x1 = 0;
int y1 = 0;
if (x0 < border) {
x1 = border - x0 - 1;
} else if (x0 < border + width) {
x1 = x0 - border;
} else {
x1 = border + 2 * width - x0 - 1;
}
if (y0 < border) {
y1 = border - y0 - 1;
} else if (y0 < border + height) {
y1 = y0 - border;
} else {
y1 = border + 2 * height - y0 - 1;
}
devOutput[y0 * borderWidth + x0] = devInput[y1 * width + x1];
}
} |
6,252 | #include "includes.h"
__global__ void GaussianMinMaxField(float* input, int inputCount, float* mins, float* maxes)
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < inputCount)
{
mins[i] = fminf(mins[i], input[i]);
maxes[i] = fmaxf(maxes[i], input[i]);
}
} |
6,253 | #include "includes.h"
//CUDA reduction algorithm. simple approach
//Tom Dale
//11-20-18
using namespace std;
#define N 100000//number of input values
#define R 100//reduction factor
#define F (1+((N-1)/R))//how many values will be in the final output
//basicRun will F number of threads go through R number of values and put the average in z[tid]
__global__ void basicRun(double *a,double *z){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if(tid > F) return;
double avg=0;
for(int i= 0;i<R;i++){//get sum of input values in this threads domain
avg += a[i+tid*R];
}
z[tid]=avg/R;//divide sum by total number of input values to get average
} |
6,254 | #include <stdio.h>
#include <random>
#include <chrono>
#include <iostream>
__device__
unsigned int floatFlip(unsigned int value)
{
unsigned int mask = (-(value >> 31)) | 0x80000000;
return value ^ mask;
}
__device__
unsigned int floatFlipInverse(unsigned int value)
{
int mask = ((value >> 31) - 1) | 0x80000000;
return value ^ mask;
}
__global__
void convertFloats(unsigned int* vals, int num_elems)
{
int lid = threadIdx.x;
int gid = lid + (blockDim.x * blockIdx.x);
if(gid >= num_elems)
{
return;
}
vals[gid] = floatFlip(vals[gid]);
}
__global__
void invertFloats(unsigned int* vals, int num_elems)
{
int lid = threadIdx.x;
int gid = lid + (blockDim.x * blockIdx.x);
if(gid >= num_elems)
{
return;
}
vals[gid] = floatFlipInverse(vals[gid]);
}
__global__
void markFlags(unsigned int* vals, uint4* flags, int dual_pos, int num_elems)
{
int lid = threadIdx.x;
int gid = lid + (blockDim.x * blockIdx.x);
if(gid >= num_elems)
{
return;
}
unsigned int val = vals[gid];
unsigned int mask = 3 << (dual_pos * 2);
val &= mask;
val >>= (dual_pos * 2);
uint4 flag = {0,0,0,0};
*((&flag.x) + val) = 1;
flags[gid] = flag;
}
__global__
void scanExclusiveSumWithBlockTotals(uint4* in, uint4* out, uint4* block_totals, int num_elems)
{
extern __shared__ uint4 sh_vals[];
int lid = threadIdx.x;
int gid = lid + (blockDim.x * blockIdx.x);
if(gid >= num_elems)
{
return;
}
sh_vals[lid] = in[gid];
__syncthreads();
for(int offset = 1; offset < blockDim.x; offset <<= 1)
{
int left = lid - offset;
uint4 left_val;
if(left >= 0)
{
left_val = sh_vals[left];
}
__syncthreads();
if(left >= 0)
{
uint4 val = sh_vals[lid];
val.x += left_val.x;
val.y += left_val.y;
val.z += left_val.z;
val.w += left_val.w;
sh_vals[lid] = val;
}
__syncthreads();
}
if(lid == 0)
{
uint4 flag = {0,0,0,0};
out[gid] = flag;
}
else
{
out[gid] = sh_vals[lid - 1];
}
if(lid == blockDim.x - 1 || gid == num_elems - 1)
{
block_totals[blockIdx.x] = sh_vals[lid];
}
}
__global__
void scanInclusiveSum(uint4* in, uint4* out, int num_elems)
{
extern __shared__ uint4 sh_vals[];
int lid = threadIdx.x;
int gid = lid + (blockDim.x * blockIdx.x);
if(gid >= num_elems)
{
return;
}
sh_vals[lid] = in[gid];
__syncthreads();
for(int offset = 1; offset < blockDim.x; offset <<= 1)
{
int left = lid - offset;
uint4 left_val;
if(left >= 0)
{
left_val = sh_vals[left];
}
__syncthreads();
if(left >= 0)
{
uint4 val = sh_vals[lid];
val.x += left_val.x;
val.y += left_val.y;
val.z += left_val.z;
val.w += left_val.w;
sh_vals[lid] = val;
}
__syncthreads();
}
out[gid] = sh_vals[lid];
}
__global__
void addBlockTotals(uint4* fours, uint4* block_totals, int num_elems)
{
int lid = threadIdx.x;
int gid = lid + (blockDim.x * blockIdx.x);
if(gid >= num_elems)
{
return;
}
if(blockIdx.x == 0)
{
return;
}
uint4 val = fours[gid];
uint4 offsets = block_totals[blockIdx.x - 1];
val.x += offsets.x;
val.y += offsets.y;
val.z += offsets.z;
val.w += offsets.w;
fours[gid] = val;
}
__global__
void scatterAddresses(unsigned int* vals_in,
unsigned int* vals_out,
uint4* flags,
uint4* addresses,
int dual_pos,
uint4 offsets,
int num_elems)
{
int lid = threadIdx.x;
int gid = lid + (blockDim.x * blockIdx.x);
if(gid >= num_elems)
{
return;
}
unsigned int val = vals_in[gid];
unsigned int mask = 3 << (dual_pos * 2);
unsigned int val_anded = val & mask;
val_anded >>= (dual_pos * 2);
uint4 addr_list = addresses[gid];
unsigned int addr = *((&addr_list.x) + val_anded);
unsigned int offset = *((&offsets.x) + val_anded);
addr += offset;
vals_out[addr] = val;
}
void radixSort(float* h_vals, int num_elems)
{
int num_threads = 1024;
int num_blocks = ceil(num_elems / ((float) num_threads));
unsigned int* d_vals;
unsigned int* d_vals_buffer;
uint4* d_flags;
uint4* d_addresses;
uint4* d_block_totals;
cudaMalloc(&d_vals, sizeof(unsigned int) * num_elems);
cudaMalloc(&d_vals_buffer, sizeof(unsigned int) * num_elems);
cudaMalloc(&d_flags, sizeof(uint4) * num_elems);
cudaMalloc(&d_addresses, sizeof(uint4) * num_elems);
cudaMalloc(&d_block_totals, sizeof(uint4) * num_blocks);
cudaMemcpy(d_vals, h_vals, sizeof(unsigned int) * num_elems, cudaMemcpyHostToDevice);
convertFloats<<<num_blocks, num_threads>>>(d_vals, num_elems);
for(int bit_pair = 0; bit_pair < 16; bit_pair++)
{
markFlags<<<num_blocks, num_threads>>>(d_vals, d_flags, bit_pair, num_elems);
scanExclusiveSumWithBlockTotals<<<num_blocks, num_threads, sizeof(uint4) * num_threads>>>(d_flags, d_addresses, d_block_totals, num_elems);
scanInclusiveSum<<<1, num_blocks, sizeof(uint4) * num_blocks>>>(d_block_totals, d_block_totals, num_blocks);
addBlockTotals<<<num_blocks, num_threads>>>(d_addresses, d_block_totals, num_elems);
uint4 totals = {0,0,0,0};
cudaMemcpy(&totals, &d_block_totals[num_blocks - 1], sizeof(uint4), cudaMemcpyDeviceToHost);
uint4 offsets = {0, totals.x, totals.x + totals.y, totals.x + totals.y + totals.z};
scatterAddresses<<<num_blocks, num_threads>>>(d_vals, d_vals_buffer, d_flags, d_addresses, bit_pair, offsets, num_elems);
unsigned int* temp = d_vals;
d_vals = d_vals_buffer;
d_vals_buffer = temp;
}
invertFloats<<<num_blocks, num_threads>>>(d_vals, num_elems);
cudaMemcpy(h_vals, d_vals, sizeof(float) * num_elems, cudaMemcpyDeviceToHost);
}
int main()
{
srand((int) time(NULL));
int num_elems = 1024 * 1024;
float* h_vals;
h_vals = (float*) malloc(sizeof(float) * num_elems);
for(int i = 0; i < num_elems; i++)
{
h_vals[i] = (((float) rand() / RAND_MAX) * 1000) - 500;
}
auto start = std::chrono::high_resolution_clock::now();
radixSort(h_vals, num_elems);
auto end = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::microseconds>(end - start) / 1000.0f;
for(int i = 0; i < 100; i++)
{
//printf("%f \n", h_vals[i]);
}
for(int i = 1; i < num_elems; i++)
{
if(h_vals[i - 1] > h_vals[i])
{
printf("Error at index %d: %f > %f \n", i, h_vals[i - 1], h_vals[i]);
break;
}
}
std::cout << elapsed.count() << " milliseconds \n";
}
|
6,255 | __device__ int evalRamp() {
return 400;
} |
6,256 | #include "includes.h"
#define TILE_WIDTH 40
//-----------------------------------------------
//--------------------------------------------------
// Compute C = A * B
//-------------------------------------------------
__global__ void MatrixMult(int m, int n, int k, double *a, double *b, double *c)
{
int row = threadIdx.y + blockIdx.y*blockDim.y;
int col = threadIdx.x + blockIdx.x*blockDim.x;
if((row < m) && (col < k))
{
double temp = 0.0;
for (int i = 0; i < n; ++i)
{
temp += a[row*n+i]*b[col+i*k];
}
c[row*k+col] = temp;
}
} |
6,257 | #include <iostream>
#include <fstream>
#include <iomanip>
#include <cstring>
#include <cmath>
#include <stdlib.h>
#include<sys/time.h>
using namespace std;
//-----------------------DO NOT CHANGE NAMES, ONLY MODIFY VALUES--------------------------------------------
//Final Values that will be compared for correctness
//You may change the function prototypes and definitions, but you need to present final results in these arrays
//-----------------------------Structures for correctness check-------------------
int **SA_Final_student;
int **L_counts_student;
char *L_student;
int F_counts_student[]={0,0,0,0};
//--------------------------------------------------------------------------------
//----------------------------------------------------------------------------------------------------------
//-----------------------DO NOT CHANGE--------------------------------------------
int read_count = 0;
int read_length = 0;
int **SA_Final;
int **L_counts;
char *L;
int F_counts[]={0,0,0,0};
//Read file to get reads
char** inputReads(char *file_path, int *read_count, int *length){
FILE *read_file = fopen(file_path, "r");
int ch, lines=0;
char **reads;
do
{
ch = fgetc(read_file);
if (ch == '\n')
lines++;
} while (ch != EOF);
rewind(read_file);
reads=(char**)malloc(lines*sizeof(char*));
*read_count = lines;
int i = 0;
size_t len = 0;
for(i = 0; i < lines; i++)
{
reads[i] = NULL;
len = 0;
getline(&reads[i], &len, read_file);
}
fclose(read_file);
int j=0;
while(reads[0][j]!='\n')
j++;
*length = j+1;
for(i=0;i<lines;i++)
reads[i][j]='$';
return reads;
}
//Check correctness of values
int checker(){
int correct = 1;
for(int i=0; i<read_count*read_length;i++){
if(L_student[i]!=L[i])
correct = 0;
for(int j=0;j<2;j++){
if(SA_Final_student[i][j]!=SA_Final[i][j])
correct = 0;
}
for(int j=0;j<4;j++){
if(L_counts_student[i][j]!=L_counts[i][j])
correct = 0;
}
}
for(int i=0;i<4;i++){
if(F_counts_student[i]!=F_counts[i])
correct = 0;
}
return correct;
}
//Rotate read by 1 character
void rotateRead(char *read, char *rotatedRead, int length){
for(int i=0;i<length-1;i++)
rotatedRead[i]=read[i+1];
rotatedRead[length-1]=read[0];
}
//Generate Sufixes and their SA's for a read
char** generateSuffixes(char *read, int length, int read_id){
char **suffixes=(char**)malloc(length*sizeof(char*));
suffixes[0]=(char*)malloc(length*sizeof(char));
for(int j=0;j<length;j++)
suffixes[0][j]=read[j];
for(int i=1;i<length;i++){
suffixes[i]=(char*)malloc(length*sizeof(char));
rotateRead(suffixes[i-1], suffixes[i], length);
}
return suffixes;
}
//Comparator for Suffixes
int compSuffixes(char *suffix1, char *suffix2, int length){
int ret = 0;
for(int i=0;i<length;i++){
if(suffix1[i]>suffix2[i])
return 1;
else if(suffix1[i]<suffix2[i])
return -1;
}
return ret;
}
//Calculates the final FM-Index
int** makeFMIndex(char ***suffixes, int read_count, int read_length, int F_count[], char *L){
int i, j;
SA_Final=(int**)malloc(read_count*read_length*sizeof(int*));
for(i=0;i<read_count*read_length;i++)
SA_Final[i]=(int*)malloc(2*sizeof(int));
//Temporary storage for collecting together all suffixes
char **temp_suffixes=(char**)malloc(read_count*read_length*sizeof(char*));
//Initalization of temporary storage
for(i=0;i<read_count;i++){
for(j=0;j<read_length;j++){
temp_suffixes[i*read_length+j]=(char*)malloc(read_length*sizeof(char));
memcpy(&temp_suffixes[i*read_length+j], &suffixes[i][j],read_length*sizeof(char));
SA_Final[i*read_length+j][0]=j;
SA_Final[i*read_length+j][1]=i;
}
}
char *temp=(char*)malloc(read_length*sizeof(char));
int **L_count=(int**)malloc(read_length*read_count*sizeof(int*));
for(i=0;i<read_length*read_count;i++){
L_count[i]=(int*)malloc(4*sizeof(int));
for(j=0;j<4;j++){
L_count[i][j]=0;
}
}
//Focus on improving this for evaluation purpose
//Sorting of suffixes
for(i=0;i<read_count*read_length-1;i++){
for(j=0;j<read_count*read_length-i-1;j++){
if(compSuffixes(temp_suffixes[j], temp_suffixes[j+1], read_length)>0){
memcpy(temp, temp_suffixes[j], read_length*sizeof(char));
memcpy(temp_suffixes[j], temp_suffixes[j+1], read_length*sizeof(char));
memcpy(temp_suffixes[j+1], temp, read_length*sizeof(char));
int temp_int = SA_Final[j][0];
SA_Final[j][0]=SA_Final[j+1][0];
SA_Final[j+1][0]=temp_int;
temp_int = SA_Final[j][1];
SA_Final[j][1]=SA_Final[j+1][1];
SA_Final[j+1][1]=temp_int;
}
}
}
free(temp);
char this_F = '$';
j=0;
//Calculation of F_count's
for(i=0;i<read_count*read_length;i++){
int count=0;
while(temp_suffixes[i][0]==this_F){
count++;i++;
}
F_count[j++]=j==0?count:count+1;
this_F = temp_suffixes[i][0];
if(temp_suffixes[i][0]=='T')
break;
}
//Calculation of L's and L_count's
for(i=0;i<read_count*read_length;i++){
char ch = temp_suffixes[i][read_length-1];
L[i]=ch;
if(i>0){
for(int k=0;k<4;k++)
L_count[i][k]=L_count[i-1][k];
}
if(ch=='A')
L_count[i][0]++;
else if(ch=='C')
L_count[i][1]++;
else if(ch=='G')
L_count[i][2]++;
else if(ch=='T')
L_count[i][3]++;
}
return L_count;
}
//-----------------------DO NOT CHANGE--------------------------------------------
int main(int argc, char *argv[]){
char **reads = inputReads(argv[1], &read_count, &read_length);//Input reads from file
char ***suffixes=(char***)malloc(read_count*sizeof(char**));//Storage for read-wise suffixes
//-----------------------------Structures for correctness check----------------------------------------------
L=(char*)malloc(read_count*read_length*sizeof(char*));//Final storage for last column of sorted suffixes
//-----------------------------Structures for correctness check----------------------------------------------
//-----------Default implementation----------------
//-----------Time capture start--------------------
struct timeval TimeValue_Start;
struct timeval TimeValue_Final;
struct timezone TimeZone_Start;
struct timezone TimeZone_Final;
long time_start, time_end;
double time_overhead_default, time_overhead_student;
gettimeofday(&TimeValue_Start, &TimeZone_Start);
//Generate read-wise suffixes
for(int i=0;i<read_count;i++){
suffixes[i]=generateSuffixes(reads[i], read_length, i);
}
//Calculate finl FM-Index
L_counts = makeFMIndex(suffixes, read_count, read_length, F_counts, L);
gettimeofday(&TimeValue_Final, &TimeZone_Final);
time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec;
time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec;
time_overhead_default = (time_end - time_start)/1000000.0;
//------------Time capture end----------------------
//--------------------------------------------------
//-----------Your implementations------------------
gettimeofday(&TimeValue_Final, &TimeZone_Final);
time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec;
//-----------Call your functions here--------------------
//-----------Call your functions here--------------------
time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec;
time_overhead_student = (time_end - time_start)/1000000.0;
//--------------------------------------------------
//----------------For debug purpose only-----------------
//for(int i=0;i<read_count*read_length;i++)
// cout<<L[i]<<"\t"<<SA_Final[i][0]<<","<<SA_Final[i][1]<<"\t"<<L_counts[i][0]<<","<<L_counts[i][1]<<","<<L_counts[i][2]<<","<<L_counts[i][3]<<endl;
//--------------------------------------------------
//---------------Correction check and speedup calculation----------------------
float speedup=0.0;
//if(checker()==1)
// speedup = time_overhead_default/time_overhead_student;
cout<<"Speedup="<<speedup<<endl;
//-----------------------------------------------------------------------------
return 0;
}
|
6,258 | #include <cuda.h>
#include <iostream>
#define nPerThread 16
using namespace std;
/* Synchronization
* - Synchronize threads in a block
*/
__global__ void myKernel(int n, double *data) {
int t = threadIdx.x;
int nt = blockDim.x;
// initialize values
for (int i=0; i<nPerThread; i++)
data[nt*i+t] = double(nt*i+t);
// (*) synchronize threads here
// increment values with inverse order
for (int i=0; i<nPerThread; i++)
data[n-(nt*i+t)-1] += 1.0;
}
int main() {
int nBlocks = 1;
int nThreads = 512;
int n = nThreads * nPerThread;
double *data = (double*) malloc(n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = 0;
}
double *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(double));
cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice);
myKernel <<< nBlocks, nThreads >>>(n, data_dev);
cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
cudaFree(data_dev);
cout << "data[n-1] = " << data[n-1] << endl;
free(data);
}
|
6,259 | #include "includes.h"
__global__ void softmax_linear(float* softmaxP, float* b, int rows, int cols){
int tid = threadIdx.x;
int bid = blockIdx.x;
float _max = -100000000.0;
float sum = 0.0;
extern __shared__ float _share[];
if(tid * cols + bid < rows * cols){
for(int i = 0 ; i < rows ; i++) _share[i] = b[i * cols + bid];
for(int i = 0 ; i < rows ; i++) _max = max(_max, _share[i]);
for(int i = 0 ; i < rows ; i++) _share[i] = __expf(_share[i]-_max);
for(int i = 0 ; i < rows ; i++) sum += _share[i];
for(int i = 0 ; i < rows ; i++) softmaxP[i * cols + bid] = _share[i]/sum;
}
} |
6,260 | //#include "crop_cuda.h"
//
//#include <stdio.h>
//#include <cstdlib>
//#include <math.h>
//#include <iostream>
//
//#include "../common/macro.h"
//
//#define PIXEL_PER_THREAD 128
//
//namespace va_cv {
//
//texture<unsigned char> tex_src;
//__constant__ int rect[5];
//
//
//
//__global__ void kernel_crop_grey(unsigned char *dst ) {
// // map from threadIdx/BlockIdx to pixel position(on dst)
// int tid = threadIdx.x + blockIdx.x * blockDim.x;
// while (tid < rect[2] * rect[3]) {
// int dst_x = tid % rect[2];
// int dst_y = tid / rect[2];
// int src_ofs = rect[4] * dst_y + dst_x;
// unsigned char c = tex1Dfetch(tex_src, src_ofs);
// dst[tid] = c;
//
// tid += blockDim.x * gridDim.x;
// }
//}
//
//void CropCuda::crop_cuda_grey_int8(const unsigned char *src, int src_width, int src_height,
// unsigned char *dst,
// int crop_left, int crop_top, int crop_width, int crop_height) {
// // crop rect, use const value
// int *rect_vec = new int[5]{crop_left, crop_top, crop_width, crop_height, src_width};
// cudaMemcpyToSymbol( rect, rect_vec, sizeof(int) * 5);
//
//
// int dst_size = crop_width * crop_height;
// int src_size = src_width * src_height;
// // dst使用cuda malloc
// unsigned char *dev_src, *dev_dst;
// cudaMalloc( (void**)&dev_dst, dst_size * sizeof(unsigned char) ) ;
// cudaMalloc( (void**)&dev_src, src_size * sizeof(unsigned char) ) ;
// cudaMemcpy( dev_src, src, src_size * sizeof(unsigned char), cudaMemcpyHostToDevice );
//
// // src使用紋理內存
// int err = cudaBindTexture( NULL, tex_src, dev_src, src_size );
// if (err != cudaSuccess) {
// printf("bind failed!!! %d\n", err);
// }
//
// // 設備函數
// dim3 blocks((dst_size + PIXEL_PER_THREAD - 1) / PIXEL_PER_THREAD);
// dim3 threads(PIXEL_PER_THREAD);
// kernel_crop_grey<<<blocks,threads>>>( dev_dst );
//
// // 讀取dst內存
// cudaMemcpy(dst, dev_dst, dst_size * sizeof(unsigned char), cudaMemcpyDeviceToHost);
//
// // 回收內存
// cudaFree(dev_dst);
// cudaFree(dev_src);
// cudaUnbindTexture( tex_src );
//
// delete[] rect_vec;
//}
//
//} |
6,261 |
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
//#include <cutil.h>
#define BLOCK_X 16
#define BLOCK_Y 16
__global__ void convolutionKernel( float *pSrcImg)
{
int x,
y;
x = threadIdx.x + blockDim.x * blockIdx.x;
y = threadIdx.y + blockDim.y * blockIdx.y;
pSrcImg[x + y*blockDim.x] = 1;
}
void pce( cudaError_t *pCudaError,
int lineNumber)
{
if (*pCudaError) printf( "cudaError at line %d:\n %s\n",
lineNumber,
cudaGetErrorString(*pCudaError));
}
int main(int argc, char **argv) {
float *pSrcImg,
*pSrcImg_device;
//size_t
// srcImgPitch;
//*pRefImg,
//*pRefImg_device,
//refImgPitch;
int imDimX = 16,
imDimY = 16,
i,
testFailed = 0;
cudaError_t
cudaError;
cudaSetDevice(0);
// memory for the device
cudaError = cudaMalloc( (void **) &pSrcImg_device,
imDimY*imDimX*sizeof(float));
pce(&cudaError, __LINE__);
// memory for the host
pSrcImg = (float *)calloc(imDimY*imDimX, sizeof(float));
//cudaError = cudaMallocHost( (void **)&pSrcImg,
// (size_t)imDimX*imDimY*sizeof(float));
//pce(&cudaError, __LINE__);
//copy memory from host to device
cudaError = cudaMemcpy(pSrcImg_device,
pSrcImg,
imDimY*imDimX*sizeof(float),
cudaMemcpyHostToDevice);
pce(&cudaError, __LINE__);
// create grid (this is c++ ?)
dim3 block( BLOCK_X,1);
dim3 grid(imDimX*imDimY/BLOCK_X,1);
convolutionKernel<<< grid, block >>>(pSrcImg_device);
cudaError = cudaGetLastError();
pce(&cudaError, __LINE__);
cudaThreadSynchronize();
cudaError = cudaGetLastError();
pce(&cudaError, __LINE__);
// copy results back
cudaError = cudaMemcpy( pSrcImg,
pSrcImg_device,
imDimY*imDimX*sizeof(float),
cudaMemcpyDeviceToHost);
pce(&cudaError, __LINE__);
for (i = 0; i < imDimX*imDimY; i++)
{
if ((int)pSrcImg[i] != 1) testFailed = 1;
}
if (testFailed != 0)
{
printf("Test failed\n");
}
else
{
printf("Test passed\n");
}
// free data
//cudaError = cudaFree( (void **)&pSrcImg_device);
//pce(&cudaError, __LINE__);
//cudaError = cudaFreeHost( (void **)&pSrcImg);
//pce(&cudaError, __LINE__);
return cudaError;
}
|
6,262 | /**
* Copyright 2021 RICOS Co. Ltd.
*
* This file is a part of ricosjp/monolish,
* and distributed under Apache-2.0 License
* https://github.com/ricosjp/monolish
*/
#include "cuda_runtime.h"
#include <iostream>
int main(int argc, char **argv) {
if (argc != 2) {
std::cout << "Usage: " << argv[0] << " [device number]" << std::endl;
return 1;
}
int device_number = std::stoi(argv[1]);
cudaError_t cudaStatus;
int count;
cudaStatus = cudaGetDeviceCount(&count);
if (cudaStatus != cudaSuccess) {
std::cerr << "CUDA API cudaGetDeviceCount failed" << cudaStatus << std::endl;
return cudaStatus;
}
if (device_number >= count) {
std::cerr << "Input device_number is larger than the number of GPU ("
<< device_number << " >= " << count << ")" << std::endl;
return 1;
}
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceProperties(&prop, device_number);
if (cudaStatus != cudaSuccess) {
std::cerr << "CUDA API cudaGetDeviceProperties failed" << std::endl;
return cudaStatus;
}
std::cout << prop.major << prop.minor << std::endl;
return 0;
}
|
6,263 | // The dataset generator generates all the datasets into one single pair of input files.
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <thrust/transform.h>
#include <thrust/fill.h>
#include <math.h>
using namespace std;
float truncs(float n)
{
float nearest = roundf(n * 100) / 100;
return nearest;
}
int main(int argc, char *argv[]) {
if(argc < 4)
{
cout<<"Ensure that an output file and 2 input files are passed in as arguments when running this program\n";
exit(0);
}
/* parse the input arguments */
//@@ Insert code here
char file1[100], file2[100], file3[100],file4[100];
strcpy(file3,argv[1]);
strcpy(file1,argv[2]);
strcpy(file2,argv[3]);
if(argc > 4)
strcpy(file4,argv[4]);
FILE *handle1 = fopen(file1, "r");
FILE *handle2 = fopen(file2, "r");
FILE *handle3 = fopen(file3,"r");
FILE *handle4;
if(argc > 4) //To write into optional output file
{
handle4 = fopen(file4, "w");
}
for(int i = 0;i < 10; i++)
{
float *hostInput1 = NULL;
float *hostInput2 = NULL;
float *hostOutput = NULL;
int inputLength;
//Read size of vector
fscanf(handle1, "%d", &inputLength);
fscanf(handle2, "%d", &inputLength);
fscanf(handle3, "%d", &inputLength);
hostInput1 = (float*) malloc(inputLength*sizeof(float));
hostInput2 = (float*) malloc(inputLength*sizeof(float));
// Import host input data
//@@ Read data from the raw files here
//@@ Insert code here
for (int ii = 0; ii < inputLength; ii++) {
fscanf(handle1, "%f", &hostInput1[ii]);
fscanf(handle2, "%f",&hostInput2[ii]);
}
// Declare and allocate host output
//@@ Insert code here
hostOutput = (float*) malloc(inputLength*sizeof(float));
// Declare and allocate thrust device input and output vectors
//@@ Insert code here
thrust::device_vector<float> da(hostInput1,hostInput1+inputLength);
thrust::device_vector<float> db(hostInput2,hostInput2+inputLength);
thrust::device_vector<float> dc(hostOutput,hostOutput+inputLength);
// Copy to device
//@@ Insert code here
// Execute vector addition
//@@ Insert Code here
thrust::transform(da.begin(), da.end(), db.begin(), dc.begin(), thrust::plus<float>());
/////////////////////////////////////////////////////////
// Copy data back to host
//@@ Insert code here
thrust::copy(dc.begin(), dc.end(), hostOutput);
// Verifying results
if(argc>4)
fprintf(handle4, "%d", inputLength);
int flag = 1;
for(int j = 0; j < inputLength; j++)
{
float n;
fscanf(handle3,"%f",&n);
if(flag)
{
if(truncs(n) != truncs(hostOutput[j]))
{
cout<<"Dataset "<<i<<" could not be verified\n";
//cout<<truncs(n)<<" "<<truncs(hostOutput[j])<<" "<<j<<endl;
flag = 0;
}
}
//hostOutput[j] = truncs(hostOutput[j]);
if(argc>4)
fprintf(handle4, "\n%f", hostOutput[j]);
}
if(flag)
cout<<"Dataset "<<i<<" verified\n";
cout<<endl;
free(hostInput1);
free(hostInput2);
free(hostOutput);
}
if(argc > 4)
cout<<"Output written into file: "<<file4<<endl;
fclose(handle1);
fclose(handle2);
return 0;
}
|
6,264 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <sys/time.h>
__host__
void printtime(struct timeval *start,struct timeval *stop) {
long time=1000000*(stop->tv_sec-start->tv_sec)+stop->tv_usec-start->tv_usec;
printf("\nCUDA execution time=%ld microseconds\n",time);
}
int main(int argc,char **argv) {
struct timeval start,stop;
gettimeofday(&start,NULL);
// run your CUDA kernel(s) here
// synchronize/finalize your CUDA computations
gettimeofday(&stop,NULL);
printtime(&start,&stop);
}
|
6,265 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#define LIST_SIZE 100000
extern "C" __device__ unsigned long long mulValue1List[LIST_SIZE];
extern "C" __device__ unsigned long long mulValue2List[LIST_SIZE];
extern "C" __device__ unsigned long long mulCountList[LIST_SIZE];
extern "C" __device__ unsigned long long record_flag;
void bambooLogRecordOff(){
long long local_record = 0;
cudaMemcpyToSymbol(record_flag, &local_record, sizeof(long long), 0, cudaMemcpyHostToDevice);
}
void bambooLogKernelBegin(long long i) {
i = 1;
cudaMemcpyToSymbol(record_flag, &i, sizeof(long long), 0, cudaMemcpyHostToDevice);
}
void bambooLogKernelEnd()
{
#ifdef KERNELTRACE
cudaDeviceSynchronize();
#endif
long long mulValue1ListLocal[LIST_SIZE];
long long mulValue2ListLocal[LIST_SIZE];
long long mulCountListLocal[LIST_SIZE];
cudaMemcpyFromSymbol(mulValue1ListLocal, mulValue1List, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(mulValue2ListLocal, mulValue2List, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(mulCountListLocal, mulCountList, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost);
FILE *profileFile = fopen("profile_mul_value_result.txt", "w");
for(long long i=0; i < LIST_SIZE; i++){
if(mulCountListLocal[i] != 0)
{
fprintf(profileFile, "%lld %lld %lld %lld\n", i, mulCountListLocal[i], mulValue1ListLocal[i], mulValue2ListLocal[i]);
}
}
fclose(profileFile);
}
|
6,266 | #include <cuda_runtime.h>
#include <cuda.h>
__device__ int ptr=0;
__global__ void a()
{
int b[100];
//atomicAdd(&ptr,1);
b[0]=ptr;
#pragma unroll
for(int i=1; i<200; i++)
{
// for(int j=1;j<90;j++)
{
//b[i][j]=b[i-1][j-1]+1;
b[i] = b[i-1]+1;
}
}
ptr=b[7]+1;
}
int main()
{
a<<<1,1>>>();
return 0;
}
|
6,267 | #include <stdio.h>
#include <cuda_runtime.h>
__device__ float fx(float a, float b) {
return a + b;
}
__global__ void kernel(void) {
printf("res = %f\n", fx(1.0, 2.0));
}
int main(int argc, char* argv[]) {
kernel <<<1,1>>>();
cudaDeviceSynchronize();
return 0;
}
|
6,268 | #include "includes.h"
__global__ void initKernel(){
return;
} |
6,269 | #include "includes.h"
__global__ void glcm_calculation_45(int *A,int *glcm, const int nx, const int ny,int max){
int ix = threadIdx.x + blockIdx.x* blockDim.x;
int iy = threadIdx.y + blockIdx.y* blockDim.y;
unsigned int idx =iy*nx+ix;
int i;
int k=0;
for(i=1;i<nx;i++){
if(blockIdx.x==i && idx <((i+1)*nx)-1){
k=max*A[idx]+A[idx-(nx-1)];
atomicAdd(&glcm[k],1);
}
}
__syncthreads();
} |
6,270 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <limits.h>
#include <math.h>
#include <cuda.h>
#include <algorithm>
#define BLOCK_SIZE 1024
__device__ unsigned int counter, counter_2;
//__device__ unsigned int flag;
__constant__ const unsigned int INTMAX = 2147483647;
// struct for dictionary
struct huffmanDictionary{
unsigned char bitSequence[255];
unsigned char bitSequenceLength;
};
// struct for node
struct huffmanNode{
unsigned char letter; // char to store
unsigned int frequency; // frequency of the char
struct huffmanNode * left; // left sub tree
struct huffmanNode * right; // right sub tree
};
struct huffmanDictionary huffmanDictionary[256];
struct huffmanNode * huffmanNode_head;
struct huffmanNode huffmanTreeNode[512];
#define DEBUG 1
__device__ int findIndex(unsigned int *freq, unsigned int size,unsigned int search){
for(int i=0;i<size;i++){
if(freq[i] == search){
return i;
}
}
return -1;
}
__global__ void findLeastFrequent(unsigned int *freq, unsigned int *min, int size, unsigned int threads, unsigned int* count, unsigned int *index){
int id = blockIdx.x*blockDim.x + threadIdx.x;
counter_2 = 0;
__syncthreads();
int ind;
if(id<threads){
while(1){
min[counter_2] = INTMAX;
atomicMin(&min[counter_2], freq[id]);
// Need global barrier
__syncthreads();
ind = findIndex(freq, threads, min[counter_2]);
index[counter_2] = ind;
// Need global barrier
__syncthreads();
freq[ind] = INTMAX;
if(id == 0) atomicInc(&counter_2, size);
// Need global barrier
__syncthreads();
min[counter_2] = INTMAX;
atomicMin(&min[counter_2], freq[id]);
// Need global barrier
__syncthreads();
ind = findIndex(freq, threads, min[counter_2]);
index[counter_2] = ind;
// Need global barrier
__syncthreads();
freq[ind] = min[counter_2] + min[counter_2-1];
if(id == 0) atomicInc(&counter_2, size);
// Need global barrier
__syncthreads();
if(min[counter_2] == INTMAX || min[counter_2-1] == INTMAX){
count[0] = counter_2;
break;
}
}
}
}
__global__ void searchSimilarIndex(unsigned int *index, unsigned int *resultIndex, unsigned int *cnt, int threads){
int id = blockIdx.x*blockDim.x + threadIdx.x;
__syncthreads();
counter = 0;
if(id != threads){
if(index[id] == index[threads]){
int temp = atomicInc(&counter, threads+1);
resultIndex[temp] = id;
}
__syncthreads();
cnt[0] = counter;
}
}
void buildHuffmanTree(int count,unsigned char *uniqueChar, unsigned int *frequency,int newIndex, int childIndex){
if(count == 0){
huffmanTreeNode[newIndex].frequency = frequency[childIndex];
huffmanTreeNode[newIndex].letter = uniqueChar[childIndex];
huffmanTreeNode[newIndex].left = NULL;
huffmanTreeNode[newIndex].right = NULL;
}
else{
huffmanTreeNode[newIndex].frequency = huffmanTreeNode[childIndex].frequency + huffmanTreeNode[childIndex + 1].frequency;
huffmanTreeNode[newIndex].left = & huffmanTreeNode[childIndex];
huffmanTreeNode[newIndex].right = & huffmanTreeNode[childIndex + 1];
huffmanNode_head = & (huffmanTreeNode[newIndex]);
}
}
void buildHuffmanDictionary(struct huffmanNode * root, unsigned char * bitSequence, unsigned char bitSequenceLength){
if(root -> left){
bitSequence[bitSequenceLength] = 0;
buildHuffmanDictionary(root -> left, bitSequence, bitSequenceLength + 1);
}
if(root -> right){
bitSequence[bitSequenceLength] = 1;
buildHuffmanDictionary(root -> right, bitSequence, bitSequenceLength + 1);
}
// copy the bit sequence and the length to the dictionary
if(root -> right == NULL && root -> left == NULL){
huffmanDictionary[root -> letter].bitSequenceLength = bitSequenceLength;
memcpy(huffmanDictionary[root -> letter].bitSequence, bitSequence, bitSequenceLength * sizeof(unsigned char));
}
}
int main(int argc, char ** argv){
clock_t start, end;
unsigned int cpuTime;
unsigned int compressedFileLength, outputFileLengthCounter, outputFileLength, extra;
unsigned int distinctCharacterCount;
unsigned int frequency[256];
unsigned char bitSequenceLength = 0, bitSequence[255];
unsigned char * compressedData, * outputData;
struct huffmanNode * huffmanNode_current;
FILE *compressedFile, *outputFile;
// read input file get length, frequency and data
compressedFile = fopen(argv[1], "r");
fread(& outputFileLength, sizeof(unsigned int), 1, compressedFile);
//no. of extra bits added, calculate here
fread(& extra, sizeof(unsigned int), 1, compressedFile);
fread(frequency, 256 * sizeof(unsigned int), 1, compressedFile);
// find length of the compressed file
fseek(compressedFile, 0, SEEK_END);
compressedFileLength = ftell(compressedFile) - 1032;
fseek(compressedFile, 1032, SEEK_SET);
// allocate the required memory, read the file
compressedData = (unsigned char *)malloc((compressedFileLength) * sizeof(unsigned char));
fread(compressedData, sizeof(unsigned char), compressedFileLength, compressedFile);
// start the clock, tick tick
start = clock();
// for(int i=0;i<256;i++) printf("%c ",frequency[i]);
// printf("\n");
// initialize the huffman tree
distinctCharacterCount = 0;
for(int i = 0; i < 256; i++){
if(frequency[i] > 0){
distinctCharacterCount ++;
}
}
int unique = 0;
unsigned char *uniqueChar, *duniqueChar;
uniqueChar = (unsigned char *)malloc(256*sizeof(unsigned char));
cudaMalloc(&duniqueChar, 256*sizeof(unsigned char));
for(int i = 0; i<256; i++){
if(frequency[i] > 0){
uniqueChar[unique++] = i;
//printf("%d ",frequency[i]);
}
}
//printf("\n");
cudaMemcpy(duniqueChar, uniqueChar, 256*sizeof(unsigned char), cudaMemcpyHostToDevice);
// *** FIND MINIMUM 2 FREQUENCY FOR ADDING NEW NODE ***
unsigned int *tempFreq, *tempDFreq;
unsigned int *min, *dmin;
unsigned int *cntMin, *dcntMin;
unsigned int *indMin, *dindMin;
int ctr;
tempFreq = (unsigned int *)malloc(unique*sizeof(unsigned int));
min = (unsigned int *)malloc(outputFileLength*sizeof(unsigned int));
cntMin = (unsigned int *)malloc(sizeof(unsigned int));
indMin = (unsigned int *)malloc(outputFileLength*sizeof(unsigned int));
ctr = 0;
for(unsigned int i=0;i<256;i++){
if(frequency[i]!=0){
tempFreq[ctr++] = frequency[i];
}
}
// for(unsigned int i=0;i<unique;i++) printf("%d:%c ",tempFreq[i],uniqueChar[i]);
// printf("\n");
cudaMalloc(&tempDFreq, unique*sizeof(unsigned int));
cudaMalloc(&dmin, outputFileLength*sizeof(unsigned int));
cudaMalloc(&dindMin, outputFileLength*sizeof(unsigned int));
cudaMalloc(&dcntMin, sizeof(unsigned int));
cudaMemcpy(tempDFreq, tempFreq, unique*sizeof(unsigned int), cudaMemcpyHostToDevice);
float num = (float)(unique)/(float)BLOCK_SIZE;
//num = (float)(5)/(float)BLOCK_SIZE;
int mod = BLOCK_SIZE;
if(unique < BLOCK_SIZE) mod = unique%BLOCK_SIZE;
//if(5 < BLOCKSIZE) mod = 5%BLOCK_SIZE;
int n = ceil(num);
//printf("%d %d\n",n,mod);
findLeastFrequent<<<n, mod>>>(tempDFreq, dmin, outputFileLength, unique, dcntMin, dindMin);
cudaDeviceSynchronize();
cudaMemcpy(min, dmin, outputFileLength*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(indMin, dindMin, outputFileLength*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(cntMin, dcntMin, sizeof(unsigned int), cudaMemcpyDeviceToHost);
// printf("count : %d\n",cntMin[0]);
// for(unsigned int i=0;i<cntMin[0];i++){
// printf("%d:%d:%d ",i,indMin[i],min[i]);
// }
// printf("\n");
// printf("Min:\n");
// for(unsigned int i=0;i<cntMin[0];i++) printf("%d ",min[i]);
// printf("\nIndMin:\n");
// for(unsigned int i=0;i<cntMin[0];i++) printf("%d ",indMin[i]);
// Get all children
unsigned int *resultIndex, *dresultIndex;
unsigned int *cnt, *dcnt;
resultIndex = (unsigned int *)malloc(cntMin[0]*sizeof(unsigned int));
cudaMalloc(&dresultIndex, cntMin[0]*sizeof(unsigned int));
cnt = (unsigned int *)malloc(sizeof(unsigned int));
cudaMalloc(&dcnt, sizeof(unsigned int));
int indexChild;
for(int i=0;i<cntMin[0]-1;i++){
num = (float)(i+1)/(float)BLOCK_SIZE;
mod = BLOCK_SIZE;
if(i+1 < BLOCK_SIZE) mod = (i+1)%BLOCK_SIZE;
n = ceil(num);
searchSimilarIndex<<<n, mod>>>(dindMin, dresultIndex, dcnt, i);
cudaDeviceSynchronize();
cudaMemcpy(resultIndex, dresultIndex, cntMin[0]*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(cnt, dcnt, sizeof(unsigned int), cudaMemcpyDeviceToHost);
if(cnt[0] == 0) indexChild = indMin[i];
else indexChild = *std::max_element(resultIndex, resultIndex + cnt[0])-1;
buildHuffmanTree(cnt[0], uniqueChar, tempFreq, i, indexChild);
}
// for(int j=0;j<cntMin[0]-1;j++){
// printf("Index %d:Frequency %u",j,huffmanTreeNode[j].frequency);
// if(huffmanTreeNode[j].letter != '\0') printf(":Letter %c\n",huffmanTreeNode[j].letter);
// if(huffmanTreeNode[j].left != NULL) printf(":Left %u:Right %u\n",(huffmanTreeNode[j].left)->frequency,(huffmanTreeNode[j].right)->frequency);
// }
// build the huffman dictionary with the bit sequence and its length
buildHuffmanDictionary(huffmanNode_head, bitSequence, bitSequenceLength);
// write data to the file
outputData = (unsigned char *)malloc(outputFileLength * sizeof(unsigned char));
huffmanNode_current = huffmanNode_head;
outputFileLengthCounter = 0;
for(int i = 0; i < compressedFileLength-extra; i++){
// value is 0 then left sub tree
if(compressedData[i] == 0){
huffmanNode_current = huffmanNode_current -> left;
if(huffmanNode_current -> left == NULL){
outputData[outputFileLengthCounter] = huffmanNode_current -> letter;
huffmanNode_current = huffmanNode_head;
outputFileLengthCounter ++;
}
}
// value is 1 the right sub tree
else {
huffmanNode_current = huffmanNode_current -> right;
if(huffmanNode_current -> right == NULL){
outputData[outputFileLengthCounter] = huffmanNode_current -> letter;
huffmanNode_current = huffmanNode_head;
outputFileLengthCounter ++;
}
}
}
// end the clock, tick tick
end = clock();
// write the data to the output file
outputFile = fopen(argv[2], "wb");
fwrite(outputData, sizeof(unsigned char), outputFileLength, outputFile);
fclose(outputFile);
// printing debug info if debug is on
if(DEBUG){
printf("\nCompressed file length :: %d", compressedFileLength/8);
//printf("\nOutput file length counter :: %d", outputFileLengthCounter);
printf("\nOutput file length :: %d", outputFileLength);
// printf("\nMerged Huffman Nodes :: %d", mergedHuffmanNodes);
// printf("\nDistinct character count :: %d", distinctCharacterCount);
}
cpuTime = (end - start) * 1000 / CLOCKS_PER_SEC;
printf("\nTime taken: %d:%d s\n", cpuTime / 1000, cpuTime % 1000);
// clean up
free(outputData);
free(compressedData);
return 0;
}
|
6,271 | #include <stdio.h>
/* ************************************************** FIRST LAYER START ********************************************************* */
/*
Layer 1: Normal 3D Convolution Layer
Input: 225 * 225 * 3 (Padding of 1)
Weight: 3 * 3 * 3 with a Stride of 2
Output: 112 * 112 * 32
Next Layer is a padding layer, so padding operation is handled in this layer itself & hence
Final Output = 114 * 114 * 32
*/
__global__ void executeFirstLayer_CONV3D_partA(double *Layer1_Neurons_GPU,
double *Layer1_Weights_GPU,
double *Layer2_Neurons_GPU,
double *Layer1_Mean_GPU,
double *Layer1_StanDev_GPU,
double *Layer1_Gamma_GPU,
double *Layer1_Beta_GPU
)
{
double product = 0.0;
int outputOffset = 115;
int stride = 2;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 114 * 114) // channel to work with
+ (blockIdx.y * 32 * 114) // Position in the grid row-wise
+ (blockIdx.z * 32) // Position in the grid column-wise
+ (threadIdx.x * 114)
+ (threadIdx.y);
int weight_Position = filter_number * 27;
int input_Position = ((blockIdx.y * 32 * 225) * stride) // Position in the grid row-wise
+ (blockIdx.z * 32 * stride) // Position in the grid column-wise
+ (threadIdx.x * 225 * stride )
+ (threadIdx.y * stride);
/* RGB weights and input 3*3*3 */
for(int channel = 0; channel < 3; channel++) // This is the Channel loop
{
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer1_Neurons_GPU[(channel * 225 * 225) + input_Position + (row * 225)] * Layer1_Weights_GPU[weight_Position + (channel * 3 * 3) + (row * 3)])
+ (Layer1_Neurons_GPU[(channel * 225 * 225) + input_Position + (row * 225) + 1] * Layer1_Weights_GPU[weight_Position + (channel * 3 * 3) + (row * 3) + 1])
+ (Layer1_Neurons_GPU[(channel * 225 * 225) + input_Position + (row * 225) + 2] * Layer1_Weights_GPU[weight_Position + (channel * 3 * 3) + (row * 3) + 2]));
}
}
double Z = (product - Layer1_Mean_GPU[filter_number]) / Layer1_StanDev_GPU[filter_number];
Z = (Z * Layer1_Gamma_GPU[filter_number]) + Layer1_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer2_Neurons_GPU[output_Position + outputOffset] = Z;
}
__global__ void executeFirstLayer_CONV3D_partB(double *Layer1_Neurons_GPU,
double *Layer1_Weights_GPU,
double *Layer2_Neurons_GPU,
double *Layer1_Mean_GPU,
double *Layer1_StanDev_GPU,
double *Layer1_Gamma_GPU,
double *Layer1_Beta_GPU
)
{
double product = 0.0;
int outputOffset = 115;
int stride = 2;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 114 * 114) // channel to work with
+ (blockIdx.y * 16 * 114 + 96) // Position in the grid row-wise and there is no column-wise position
+ (threadIdx.x * 114) // Position inside the 256 (16 * 16) block
+ (threadIdx.y);
int weight_Position = filter_number * 27;
int input_Position = ((blockIdx.y * 16 * 225) * stride) + (96 * stride) // Position in the grid row-wise and column-wise
+ (threadIdx.x * 225 * stride)
+ (threadIdx.y * stride);
/* RGB weights and input 3*3*3 */
for(int channel = 0; channel < 3; channel++) // This is the Channel loop
{
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer1_Neurons_GPU[(channel * 225 * 225) + input_Position + (row * 225)] * Layer1_Weights_GPU[weight_Position + (channel * 3 * 3) + (row * 3)])
+ (Layer1_Neurons_GPU[(channel * 225 * 225) + input_Position + (row * 225) + 1] * Layer1_Weights_GPU[weight_Position + (channel * 3 * 3) + (row * 3) + 1])
+ (Layer1_Neurons_GPU[(channel * 225 * 225) + input_Position + (row * 225) + 2] * Layer1_Weights_GPU[weight_Position + (channel * 3 * 3) + (row * 3) + 2]));
}
}
double Z = (product - Layer1_Mean_GPU[filter_number]) / Layer1_StanDev_GPU[filter_number];
Z = (Z * Layer1_Gamma_GPU[filter_number]) + Layer1_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer2_Neurons_GPU[output_Position + outputOffset] = Z;
}
__global__ void executeFirstLayer_CONV3D_partC(double *Layer1_Neurons_GPU,
double *Layer1_Weights_GPU,
double *Layer2_Neurons_GPU,
double *Layer1_Mean_GPU,
double *Layer1_StanDev_GPU,
double *Layer1_Gamma_GPU,
double *Layer1_Beta_GPU
)
{
double product = 0.0;
int outputOffset = 115;
int stride = 2;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 114 * 114) // channel to work with
+ (96 * 114) // Position in the grid row-wise as row is last
+ (blockIdx.y * 16) // Position in the grid column-wise
+ (threadIdx.x * 114) // Position inside the 256 (16 * 16) block
+ (threadIdx.y);
int weight_Position = filter_number * 27;
int input_Position = ((96 * 225) * stride)
+ (blockIdx.y * 16 * stride) // Position in the grid row-wise and column-wise
+ (threadIdx.x * 225 * stride)
+ (threadIdx.y * stride);
/* RGB weights and input 3*3*3 */
for(int channel = 0; channel < 3; channel++) // This is the Channel loop
{
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer1_Neurons_GPU[(channel * 225 * 225) + input_Position + (row * 225)] * Layer1_Weights_GPU[weight_Position + (channel * 3 * 3) + (row * 3)])
+ (Layer1_Neurons_GPU[(channel * 225 * 225) + input_Position + (row * 225) + 1] * Layer1_Weights_GPU[weight_Position + (channel * 3 * 3) + (row * 3) + 1])
+ (Layer1_Neurons_GPU[(channel * 225 * 225) + input_Position + (row * 225) + 2] * Layer1_Weights_GPU[weight_Position + (channel * 3 * 3) + (row * 3) + 2]));
}
}
double Z = (product - Layer1_Mean_GPU[filter_number]) / Layer1_StanDev_GPU[filter_number];
Z = (Z * Layer1_Gamma_GPU[filter_number]) + Layer1_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer2_Neurons_GPU[output_Position + outputOffset] = Z;
}
/* ************************************************** FIRST LAYER END ************************************************************ */
/* ************************************************** SECOND LAYER START ********************************************************* */
/*
Layer 2: Depthwise Separable Convolution Layer
Input: 114 * 114 * 3 (After padding)
Weight: 3 * 3 * 32 with a Stride of 1
Output: 112 * 112 * 32
*/
__global__ void executeSecondLayer_DSC_partA(double *Layer2_Neurons_GPU,
double *Layer2_Weights_GPU,
double *Layer3_Neurons_GPU,
double *Layer2_Mean_GPU,
double *Layer2_StanDev_GPU,
double *Layer2_Gamma_GPU,
double *Layer2_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 112 * 112) // channel to work with
+ (blockIdx.y * 32 * 112) // Position in the grid row-wise
+ (blockIdx.z * 32) // Position in the grid column-wise
+ (threadIdx.x * 112)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (blockIdx.y * 32 * 114) // Position in the grid row-wise
+ (blockIdx.z * 32) // Position in the grid column-wise
+ (threadIdx.x * 114)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer2_Neurons_GPU[(filter_number * 114 * 114) + input_Position + (row * 114)] * Layer2_Weights_GPU[weight_Position + (row * 3)])
+ (Layer2_Neurons_GPU[(filter_number * 114 * 114) + input_Position + (row * 114) + 1] * Layer2_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer2_Neurons_GPU[(filter_number * 114 * 114) + input_Position + (row * 114) + 2] * Layer2_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer2_Mean_GPU[filter_number]) / Layer2_StanDev_GPU[filter_number];
Z = (Z * Layer2_Gamma_GPU[filter_number]) + Layer2_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer3_Neurons_GPU[output_Position] = Z;
}
__global__ void executeSecondLayer_DSC_partB(double *Layer2_Neurons_GPU,
double *Layer2_Weights_GPU,
double *Layer3_Neurons_GPU,
double *Layer2_Mean_GPU,
double *Layer2_StanDev_GPU,
double *Layer2_Gamma_GPU,
double *Layer2_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 112 * 112) // channel to work with
+ (blockIdx.y * 16 * 112 + 96) // Position in the grid row-wise and there is no column-wise position
+ (threadIdx.x * 112) // Position inside the 256 (16 * 16) block
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (blockIdx.y * 16 * 114)
+ (96) // Position in the grid row-wise and column-wise
+ (threadIdx.x * 114)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer2_Neurons_GPU[(filter_number * 114 * 114) + input_Position + (row * 114)] * Layer2_Weights_GPU[weight_Position + (row * 3)])
+ (Layer2_Neurons_GPU[(filter_number * 114 * 114) + input_Position + (row * 114) + 1] * Layer2_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer2_Neurons_GPU[(filter_number * 114 * 114) + input_Position + (row * 114) + 2] * Layer2_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer2_Mean_GPU[filter_number]) / Layer2_StanDev_GPU[filter_number];
Z = (Z * Layer2_Gamma_GPU[filter_number]) + Layer2_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer3_Neurons_GPU[output_Position] = Z;
}
__global__ void executeSecondLayer_DSC_partC(double *Layer2_Neurons_GPU,
double *Layer2_Weights_GPU,
double *Layer3_Neurons_GPU,
double *Layer2_Mean_GPU,
double *Layer2_StanDev_GPU,
double *Layer2_Gamma_GPU,
double *Layer2_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 112 * 112) // channel to work with
+ (96 * 112) // Position in the grid row-wise as row is last
+ (blockIdx.y * 16) // Position in the grid column-wise
+ (threadIdx.x * 112) // Position inside the 256 (16 * 16) block
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (96 * 114)
+ (blockIdx.y * 16) // Position in the grid row-wise and column-wise
+ (threadIdx.x * 114)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer2_Neurons_GPU[(filter_number * 114 * 114) + input_Position + (row * 114)] * Layer2_Weights_GPU[weight_Position + (row * 3)])
+ (Layer2_Neurons_GPU[(filter_number * 114 * 114) + input_Position + (row * 114) + 1] * Layer2_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer2_Neurons_GPU[(filter_number * 114 * 114) + input_Position + (row * 114) + 2] * Layer2_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer2_Mean_GPU[filter_number]) / Layer2_StanDev_GPU[filter_number];
Z = (Z * Layer2_Gamma_GPU[filter_number]) + Layer2_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer3_Neurons_GPU[output_Position] = Z;
}
/* ************************************************** SECOND LAYER END ********************************************************* */
/* ************************************************** THIRD LAYER START ******************************************************** */
/*
Layer 3: Pointwise Separable Convolution Layer
Input: 112 * 112 * 32 (After padding)
Weight: 1 * 1 * 32 * 64 with a Stride of 1
Output: 113 * 113 * 64 (Padding of 1 is handled in this layer execution itself)
*/
__global__ void executeThirdLayer_PSC_partA(double *Layer3_Neurons_GPU,
double *Layer3_Weights_GPU,
double *Layer4_Neurons_GPU,
double *Layer3_Mean_GPU,
double *Layer3_StanDev_GPU,
double *Layer3_Gamma_GPU,
double *Layer3_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 113 * 113) // channel to work with
+ (blockIdx.y * 32 * 113) // Position in the grid row-wise
+ (blockIdx.z * 32) // Position in the grid column-wise
+ (threadIdx.x * 113)
+ (threadIdx.y);
int weight_Position = filter_number * 32;
int input_Position = (blockIdx.y * 32 * 112) // Position in the grid row-wise
+ (blockIdx.z * 32) // Position in the grid column-wise
+ (threadIdx.x * 112)
+ (threadIdx.y);
for(int channel = 0; channel < 32; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer3_Neurons_GPU[(channel * 112 * 112) + input_Position] * Layer3_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer3_Mean_GPU[filter_number]) / Layer3_StanDev_GPU[filter_number];
Z = (Z * Layer3_Gamma_GPU[filter_number]) + Layer3_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer4_Neurons_GPU[output_Position] = Z;
}
__global__ void executeThirdLayer_PSC_partB(double *Layer3_Neurons_GPU,
double *Layer3_Weights_GPU,
double *Layer4_Neurons_GPU,
double *Layer3_Mean_GPU,
double *Layer3_StanDev_GPU,
double *Layer3_Gamma_GPU,
double *Layer3_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 113 * 113) // channel to work with
+ (blockIdx.y * 16 * 113 + 96) // Position in the grid row-wise and there is no column-wise position
+ (threadIdx.x * 113) // Position inside the 256 (16 * 16) block
+ (threadIdx.y);
int weight_Position = filter_number * 32;
int input_Position = (blockIdx.y * 16 * 112) // Position in the grid row-wise
+ (96) // Position in the grid column-wise
+ (threadIdx.x * 112)
+ (threadIdx.y);
for(int channel = 0 ; channel < 32 ; channel++) // Channel loop as we have 32 input channels to work with
{
product += (Layer3_Neurons_GPU[(channel * 112 * 112) + input_Position] * Layer3_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer3_Mean_GPU[filter_number]) / Layer3_StanDev_GPU[filter_number];
Z = (Z * Layer3_Gamma_GPU[filter_number]) + Layer3_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer4_Neurons_GPU[output_Position] = Z;
}
__global__ void executeThirdLayer_PSC_partC(double *Layer3_Neurons_GPU,
double *Layer3_Weights_GPU,
double *Layer4_Neurons_GPU,
double *Layer3_Mean_GPU,
double *Layer3_StanDev_GPU,
double *Layer3_Gamma_GPU,
double *Layer3_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 113 * 113) // channel to work with
+ (96 * 113) // Position in the grid row-wise as row is last
+ (blockIdx.y * 16) // Position in the grid column-wise
+ (threadIdx.x * 113) // Position inside the 256 (16 * 16) block
+ (threadIdx.y);
int weight_Position = filter_number * 32;
int input_Position = (96 * 112) // row-wise: the bottom part of the grid after 96th row
+ (blockIdx.y * 16) // column-wise: block number in the 6 blocks of 16 * 16 threads
+ (threadIdx.x * 112) // Position inside one the above block row-wise
+ (threadIdx.y); // Position inside one the above block column-wise
for(int channel = 0 ; channel < 32 ; channel++) // Channel loop as we have 32 input channels to work with
{
product += (Layer3_Neurons_GPU[(channel * 112 * 112) + input_Position] * Layer3_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer3_Mean_GPU[filter_number]) / Layer3_StanDev_GPU[filter_number];
Z = (Z * Layer3_Gamma_GPU[filter_number]) + Layer3_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer4_Neurons_GPU[output_Position] = Z;
}
/* ************************************************** THIRD LAYER END ********************************************************* */
/* ************************************************** FOURTH LAYER START ****************************************************** */
/*
Layer 4: Depthwise Separable Convolution Layer
Input: 113 * 113 * 64
Weight: 3 * 3 * 64 with a Stride of 2
Output: 56 * 56 * 64
*/
__global__ void executeFourthLayer_DSC_partA(double *Layer4_Neurons_GPU,
double *Layer4_Weights_GPU,
double *Layer5_Neurons_GPU,
double *Layer4_Mean_GPU,
double *Layer4_StanDev_GPU,
double *Layer4_Gamma_GPU,
double *Layer4_Beta_GPU
)
{
double product = 0.0;
int stride = 2;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 56 * 56) // channel to work with
+ (threadIdx.x * 56)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 113 * stride )
+ (threadIdx.y * stride);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113)] * Layer4_Weights_GPU[weight_Position + (row * 3)])
+ (Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113) + 1] * Layer4_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113) + 2] * Layer4_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer4_Mean_GPU[filter_number]) / Layer4_StanDev_GPU[filter_number];
Z = (Z * Layer4_Gamma_GPU[filter_number]) + Layer4_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer5_Neurons_GPU[output_Position] = Z;
}
__global__ void executeFourthLayer_DSC_partB(double *Layer4_Neurons_GPU,
double *Layer4_Weights_GPU,
double *Layer5_Neurons_GPU,
double *Layer4_Mean_GPU,
double *Layer4_StanDev_GPU,
double *Layer4_Gamma_GPU,
double *Layer4_Beta_GPU
)
{
double product = 0.0;
int stride = 2;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 56 * 56) // channel to work with
+ (threadIdx.x * 56)
+ (threadIdx.y + 32);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 113 * stride)
+ (threadIdx.y * stride)
+ (32 * stride);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113)] * Layer4_Weights_GPU[weight_Position + (row * 3)])
+ (Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113) + 1] * Layer4_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113) + 2] * Layer4_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer4_Mean_GPU[filter_number]) / Layer4_StanDev_GPU[filter_number];
Z = (Z * Layer4_Gamma_GPU[filter_number]) + Layer4_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer5_Neurons_GPU[output_Position] = Z;
}
__global__ void executeFourthLayer_DSC_partC(double *Layer4_Neurons_GPU,
double *Layer4_Weights_GPU,
double *Layer5_Neurons_GPU,
double *Layer4_Mean_GPU,
double *Layer4_StanDev_GPU,
double *Layer4_Gamma_GPU,
double *Layer4_Beta_GPU
)
{
double product = 0.0;
int stride = 2;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 56 * 56) // channel to work with
+ (56 * 32)
+ (threadIdx.x * 56)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (113 * 32 * stride)
+ (threadIdx.x * 113 * stride)
+ (threadIdx.y * stride);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113)] * Layer4_Weights_GPU[weight_Position + (row * 3)])
+ (Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113) + 1] * Layer4_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113) + 2] * Layer4_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer4_Mean_GPU[filter_number]) / Layer4_StanDev_GPU[filter_number];
Z = (Z * Layer4_Gamma_GPU[filter_number]) + Layer4_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer5_Neurons_GPU[output_Position] = Z;
}
__global__ void executeFourthLayer_DSC_partD(double *Layer4_Neurons_GPU,
double *Layer4_Weights_GPU,
double *Layer5_Neurons_GPU,
double *Layer4_Mean_GPU,
double *Layer4_StanDev_GPU,
double *Layer4_Gamma_GPU,
double *Layer4_Beta_GPU
)
{
double product = 0.0;
int stride = 2;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 56 * 56) // channel to work with
+ (56 * 32)
+ 32
+ (threadIdx.x * 56)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (113 * 32 * stride)
+ (32 * stride)
+ (threadIdx.x * 113 * stride)
+ (threadIdx.y * stride);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113)] * Layer4_Weights_GPU[weight_Position + (row * 3)])
+ (Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113) + 1] * Layer4_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer4_Neurons_GPU[(filter_number * 113 * 113) + input_Position + (row * 113) + 2] * Layer4_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer4_Mean_GPU[filter_number]) / Layer4_StanDev_GPU[filter_number];
Z = (Z * Layer4_Gamma_GPU[filter_number]) + Layer4_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer5_Neurons_GPU[output_Position] = Z;
}
/* ************************************************** FOURTH LAYER END ****************************************************** */
/* *************************************************** FIFTH LAYER START **************************************************** */
/*
Layer 5: Pointwise Separable Convolution Layer
Input: 56 * 56 * 64
Weight: 1 * 1 * 64 * 128 with a Stride of 1
Output: 58 * 58 * 128 (Padding for the next layer is handled here itself)
*/
__global__ void executeFifthLayer_PSC_partA(double *Layer5_Neurons_GPU,
double *Layer5_Weights_GPU,
double *Layer6_Neurons_GPU,
double *Layer5_Mean_GPU,
double *Layer5_StanDev_GPU,
double *Layer5_Gamma_GPU,
double *Layer5_Beta_GPU
)
{
double product = 0.0;
int offset = 59;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 58 * 58) // channel to work with
+ (threadIdx.x * 58)
+ (threadIdx.y);
int weight_Position = filter_number * 64;
int input_Position = (threadIdx.x * 56)
+ (threadIdx.y);
for(int channel = 0; channel < 64; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer5_Neurons_GPU[(channel * 56 * 56) + input_Position] * Layer5_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer5_Mean_GPU[filter_number]) / Layer5_StanDev_GPU[filter_number];
Z = (Z * Layer5_Gamma_GPU[filter_number]) + Layer5_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer6_Neurons_GPU[output_Position + offset] = Z;
}
__global__ void executeFifthLayer_PSC_partB(double *Layer5_Neurons_GPU,
double *Layer5_Weights_GPU,
double *Layer6_Neurons_GPU,
double *Layer5_Mean_GPU,
double *Layer5_StanDev_GPU,
double *Layer5_Gamma_GPU,
double *Layer5_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 59;
// Output position
int output_Position = (filter_number * 58 * 58) // channel to work with
+ (threadIdx.x * 58)
+ (threadIdx.y + 32);
int weight_Position = filter_number * 64;
int input_Position = (threadIdx.x * 56)
+ (threadIdx.y)
+ (32);
for(int channel = 0; channel < 64; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer5_Neurons_GPU[(channel * 56 * 56) + input_Position] * Layer5_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer5_Mean_GPU[filter_number]) / Layer5_StanDev_GPU[filter_number];
Z = (Z * Layer5_Gamma_GPU[filter_number]) + Layer5_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer6_Neurons_GPU[output_Position + offset] = Z;
}
__global__ void executeFifthLayer_PSC_partC(double *Layer5_Neurons_GPU,
double *Layer5_Weights_GPU,
double *Layer6_Neurons_GPU,
double *Layer5_Mean_GPU,
double *Layer5_StanDev_GPU,
double *Layer5_Gamma_GPU,
double *Layer5_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 59;
// Output position
int output_Position = (filter_number * 58 * 58) // channel to work with
+ (58 * 32)
+ (threadIdx.x * 58)
+ (threadIdx.y);
int weight_Position = filter_number * 64;
int input_Position = (56 * 32)
+ (threadIdx.x * 56)
+ (threadIdx.y);
for(int channel = 0; channel < 64; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer5_Neurons_GPU[(channel * 56 * 56) + input_Position] * Layer5_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer5_Mean_GPU[filter_number]) / Layer5_StanDev_GPU[filter_number];
Z = (Z * Layer5_Gamma_GPU[filter_number]) + Layer5_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer6_Neurons_GPU[output_Position + offset] = Z;
}
__global__ void executeFifthLayer_PSC_partD(double *Layer5_Neurons_GPU,
double *Layer5_Weights_GPU,
double *Layer6_Neurons_GPU,
double *Layer5_Mean_GPU,
double *Layer5_StanDev_GPU,
double *Layer5_Gamma_GPU,
double *Layer5_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 59;
// Output position
int output_Position = (filter_number * 58 * 58) // channel to work with
+ (58 * 32)
+ 32
+ (threadIdx.x * 58)
+ (threadIdx.y);
int weight_Position = filter_number * 64;
int input_Position = (56 * 32)
+ (32)
+ (threadIdx.x * 56)
+ (threadIdx.y);
for(int channel = 0; channel < 64; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer5_Neurons_GPU[(channel * 56 * 56) + input_Position] * Layer5_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer5_Mean_GPU[filter_number]) / Layer5_StanDev_GPU[filter_number];
Z = (Z * Layer5_Gamma_GPU[filter_number]) + Layer5_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer6_Neurons_GPU[output_Position + offset] = Z;
}
/* *************************************************** FIFTH LAYER END **************************************************** */
/* *************************************************** SIXTH LAYER START ************************************************** */
/*
Layer 6: Depthwise Separable Convolution Layer
Input: 58 * 58 * 128
Weight: 3 * 3 * 128 with a Stride of 1
Output: 56 * 56 * 128
*/
__global__ void executeSixthLayer_DSC_partA(double *Layer6_Neurons_GPU,
double *Layer6_Weights_GPU,
double *Layer7_Neurons_GPU,
double *Layer6_Mean_GPU,
double *Layer6_StanDev_GPU,
double *Layer6_Gamma_GPU,
double *Layer6_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 56 * 56) // channel to work with
+ (threadIdx.x * 56)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 58)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58)] * Layer6_Weights_GPU[weight_Position + (row * 3)])
+ (Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58) + 1] * Layer6_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58) + 2] * Layer6_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer6_Mean_GPU[filter_number]) / Layer6_StanDev_GPU[filter_number];
Z = (Z * Layer6_Gamma_GPU[filter_number]) + Layer6_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer7_Neurons_GPU[output_Position] = Z;
}
__global__ void executeSixthLayer_DSC_partB(double *Layer6_Neurons_GPU,
double *Layer6_Weights_GPU,
double *Layer7_Neurons_GPU,
double *Layer6_Mean_GPU,
double *Layer6_StanDev_GPU,
double *Layer6_Gamma_GPU,
double *Layer6_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 56 * 56) // channel to work with
+ (threadIdx.x * 56)
+ (threadIdx.y + 32);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 58)
+ (threadIdx.y)
+ (32);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58)] * Layer6_Weights_GPU[weight_Position + (row * 3)])
+ (Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58) + 1] * Layer6_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58) + 2] * Layer6_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer6_Mean_GPU[filter_number]) / Layer6_StanDev_GPU[filter_number];
Z = (Z * Layer6_Gamma_GPU[filter_number]) + Layer6_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer7_Neurons_GPU[output_Position] = Z;
}
__global__ void executeSixthLayer_DSC_partC(double *Layer6_Neurons_GPU,
double *Layer6_Weights_GPU,
double *Layer7_Neurons_GPU,
double *Layer6_Mean_GPU,
double *Layer6_StanDev_GPU,
double *Layer6_Gamma_GPU,
double *Layer6_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 56 * 56) // channel to work with
+ (56 * 32)
+ (threadIdx.x * 56)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (58 * 32)
+ (threadIdx.x * 58)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58)] * Layer6_Weights_GPU[weight_Position + (row * 3)])
+ (Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58) + 1] * Layer6_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58) + 2] * Layer6_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer6_Mean_GPU[filter_number]) / Layer6_StanDev_GPU[filter_number];
Z = (Z * Layer6_Gamma_GPU[filter_number]) + Layer6_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer7_Neurons_GPU[output_Position] = Z;
}
__global__ void executeSixthLayer_DSC_partD(double *Layer6_Neurons_GPU,
double *Layer6_Weights_GPU,
double *Layer7_Neurons_GPU,
double *Layer6_Mean_GPU,
double *Layer6_StanDev_GPU,
double *Layer6_Gamma_GPU,
double *Layer6_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 56 * 56) // channel to work with
+ (56 * 32)
+ 32
+ (threadIdx.x * 56)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (58 * 32)
+ (32)
+ (threadIdx.x * 58)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58)] * Layer6_Weights_GPU[weight_Position + (row * 3)])
+ (Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58) + 1] * Layer6_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer6_Neurons_GPU[(filter_number * 58 * 58) + input_Position + (row * 58) + 2] * Layer6_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer6_Mean_GPU[filter_number]) / Layer6_StanDev_GPU[filter_number];
Z = (Z * Layer6_Gamma_GPU[filter_number]) + Layer6_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer7_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** SIXTH LAYER END **************************************************** */
/* *************************************************** SEVENTH LAYER START ************************************************ */
/*
Layer 7: Pointwise Separable Convolution Layer
Input: 56 * 56 * 128
Weight: 1 * 1 * 128 * 128 with a Stride of 1
Output: 57 * 57 * 128 (Padding for the next layer is handled in this layer itself)
*/
__global__ void executeSeventhLayer_PSC_partA(double *Layer7_Neurons_GPU,
double *Layer7_Weights_GPU,
double *Layer8_Neurons_GPU,
double *Layer7_Mean_GPU,
double *Layer7_StanDev_GPU,
double *Layer7_Gamma_GPU,
double *Layer7_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 57 * 57) // channel to work with
+ (threadIdx.x * 57)
+ (threadIdx.y);
int weight_Position = filter_number * 128;
int input_Position = (threadIdx.x * 56)
+ (threadIdx.y);
for(int channel = 0; channel < 128; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer7_Neurons_GPU[(channel * 56 * 56) + input_Position] * Layer7_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer7_Mean_GPU[filter_number]) / Layer7_StanDev_GPU[filter_number];
Z = (Z * Layer7_Gamma_GPU[filter_number]) + Layer7_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer8_Neurons_GPU[output_Position] = Z;
}
__global__ void executeSeventhLayer_PSC_partB(double *Layer7_Neurons_GPU,
double *Layer7_Weights_GPU,
double *Layer8_Neurons_GPU,
double *Layer7_Mean_GPU,
double *Layer7_StanDev_GPU,
double *Layer7_Gamma_GPU,
double *Layer7_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 57 * 57) // channel to work with
+ (threadIdx.x * 57)
+ (threadIdx.y + 32);
int weight_Position = filter_number * 128;
int input_Position = (threadIdx.x * 56)
+ (threadIdx.y)
+ (32);
for(int channel = 0; channel < 128 ; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer7_Neurons_GPU[(channel * 56 * 56) + input_Position] * Layer7_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer7_Mean_GPU[filter_number]) / Layer7_StanDev_GPU[filter_number];
Z = (Z * Layer7_Gamma_GPU[filter_number]) + Layer7_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer8_Neurons_GPU[output_Position] = Z;
}
__global__ void executeSeventhLayer_PSC_partC(double *Layer7_Neurons_GPU,
double *Layer7_Weights_GPU,
double *Layer8_Neurons_GPU,
double *Layer7_Mean_GPU,
double *Layer7_StanDev_GPU,
double *Layer7_Gamma_GPU,
double *Layer7_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 57 * 57) // channel to work with
+ (57 * 32)
+ (threadIdx.x * 57)
+ (threadIdx.y);
int weight_Position = filter_number * 128;
int input_Position = (56 * 32)
+ (threadIdx.x * 56)
+ (threadIdx.y);
for(int channel = 0; channel < 128 ; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer7_Neurons_GPU[(channel * 56 * 56) + input_Position] * Layer7_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer7_Mean_GPU[filter_number]) / Layer7_StanDev_GPU[filter_number];
Z = (Z * Layer7_Gamma_GPU[filter_number]) + Layer7_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer8_Neurons_GPU[output_Position] = Z;
}
__global__ void executeSeventhLayer_PSC_partD(double *Layer7_Neurons_GPU,
double *Layer7_Weights_GPU,
double *Layer8_Neurons_GPU,
double *Layer7_Mean_GPU,
double *Layer7_StanDev_GPU,
double *Layer7_Gamma_GPU,
double *Layer7_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 57 * 57) // channel to work with
+ (57 * 32)
+ 32
+ (threadIdx.x * 57)
+ (threadIdx.y);
int weight_Position = filter_number * 128;
int input_Position = (56 * 32)
+ (32)
+ (threadIdx.x * 56)
+ (threadIdx.y);
for(int channel = 0; channel < 128 ; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer7_Neurons_GPU[(channel * 56 * 56) + input_Position] * Layer7_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer7_Mean_GPU[filter_number]) / Layer7_StanDev_GPU[filter_number];
Z = (Z * Layer7_Gamma_GPU[filter_number]) + Layer7_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer8_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** SEVENTH LAYER END **************************************************** */
/* *************************************************** EIGHTH LAYER START ************************************************** */
/*
Layer 8: Depthwise Separable Convolution Layer
Input: 57 * 57 * 128
Weight: 3 * 3 * 128 with a Stride of 2
Output: 28 * 28 * 128
*/
__global__ void executeEighthLayer_DSC(double *Layer8_Neurons_GPU,
double *Layer8_Weights_GPU,
double *Layer9_Neurons_GPU,
double *Layer8_Mean_GPU,
double *Layer8_StanDev_GPU,
double *Layer8_Gamma_GPU,
double *Layer8_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int stride = 2;
// Output position
int output_Position = (filter_number * 28 * 28) // channel to work with
+ (threadIdx.x * 28)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 57 * stride)
+ (threadIdx.y * stride);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer8_Neurons_GPU[(filter_number * 57 * 57) + input_Position + (row * 57)] * Layer8_Weights_GPU[weight_Position + (row * 3)])
+ (Layer8_Neurons_GPU[(filter_number * 57 * 57) + input_Position + (row * 57) + 1] * Layer8_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer8_Neurons_GPU[(filter_number * 57 * 57) + input_Position + (row * 57) + 2] * Layer8_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer8_Mean_GPU[filter_number]) / Layer8_StanDev_GPU[filter_number];
Z = (Z * Layer8_Gamma_GPU[filter_number]) + Layer8_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer9_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** EIGHTH LAYER END **************************************************** */
/* *************************************************** NINTH LAYER START ************************************************** */
/*
Layer 9: Pointwise Separable Convolution Layer
Input: 28 * 28 * 128
Weight: 1 * 1 * 128 * 256 with a Stride of 1
Output: 30 * 30 * 256 (Handling the padding for the next layer)
*/
__global__ void executeNinthLayer_PSC(double *Layer9_Neurons_GPU,
double *Layer9_Weights_GPU,
double *Layer10_Neurons_GPU,
double *Layer9_Mean_GPU,
double *Layer9_StanDev_GPU,
double *Layer9_Gamma_GPU,
double *Layer9_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 31;
// Output position
int output_Position = (filter_number * 30 * 30) // channel to work with
+ (threadIdx.x * 30)
+ (threadIdx.y);
int weight_Position = filter_number * 128;
int input_Position = (threadIdx.x * 28)
+ (threadIdx.y);
for(int channel = 0; channel < 128; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer9_Neurons_GPU[(channel * 28 * 28) + input_Position] * Layer9_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer9_Mean_GPU[filter_number]) / Layer9_StanDev_GPU[filter_number];
Z = (Z * Layer9_Gamma_GPU[filter_number]) + Layer9_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer10_Neurons_GPU[output_Position + offset] = Z;
}
/* *************************************************** NINTH LAYER END **************************************************** */
/* *************************************************** TENTH LAYER START ************************************************** */
/*
Layer 10: Depthwise Separable Convolution Layer
Input: 30 * 30 * 256
Weight: 3 * 3 * 256 with a Stride of 1
Output: 28 * 28 * 256
*/
__global__ void executeTenthLayer_DSC(double *Layer10_Neurons_GPU,
double *Layer10_Weights_GPU,
double *Layer11_Neurons_GPU,
double *Layer10_Mean_GPU,
double *Layer10_StanDev_GPU,
double *Layer10_Gamma_GPU,
double *Layer10_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 28 * 28) // channel to work with
+ (threadIdx.x * 28)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 30)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer10_Neurons_GPU[(filter_number * 30 * 30) + input_Position + (row * 30)] * Layer10_Weights_GPU[weight_Position + (row * 3)])
+ (Layer10_Neurons_GPU[(filter_number * 30 * 30) + input_Position + (row * 30) + 1] * Layer10_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer10_Neurons_GPU[(filter_number * 30 * 30) + input_Position + (row * 30) + 2] * Layer10_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer10_Mean_GPU[filter_number]) / Layer10_StanDev_GPU[filter_number];
Z = (Z * Layer10_Gamma_GPU[filter_number]) + Layer10_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer11_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** TENTH LAYER END **************************************************** */
/* *************************************************** ELEVENTH LAYER START ************************************************** */
/*
Layer 11: Pointwise Separable Convolution Layer
Input: 28 * 28 * 256
Weight: 1 * 1 * 256 * 256 with a Stride of 1
Output: 29 * 29 * 256 (Padding for the next layer is handled in this layer itself)
*/
__global__ void executeEleventhLayer_PSC(double *Layer11_Neurons_GPU,
double *Layer11_Weights_GPU,
double *Layer12_Neurons_GPU,
double *Layer11_Mean_GPU,
double *Layer11_StanDev_GPU,
double *Layer11_Gamma_GPU,
double *Layer11_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 29 * 29) // channel to work with
+ (threadIdx.x * 29)
+ (threadIdx.y);
int weight_Position = filter_number * 256;
int input_Position = (threadIdx.x * 28)
+ (threadIdx.y);
for(int channel = 0; channel < 256; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer11_Neurons_GPU[(channel * 28 * 28) + input_Position] * Layer11_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer11_Mean_GPU[filter_number]) / Layer11_StanDev_GPU[filter_number];
Z = (Z * Layer11_Gamma_GPU[filter_number]) + Layer11_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer12_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** ELEVENTH LAYER END **************************************************** */
/* *************************************************** TWELFTH LAYER START ************************************************** */
/*
Layer 12: Depthwise Separable Convolution Layer
Input: 29 * 29 * 256
Weight: 3 * 3 * 256 with a Stride of 2
Output: 14 * 14 * 256
*/
__global__ void executeTwelfthLayer_DSC(double *Layer12_Neurons_GPU,
double *Layer12_Weights_GPU,
double *Layer13_Neurons_GPU,
double *Layer12_Mean_GPU,
double *Layer12_StanDev_GPU,
double *Layer12_Gamma_GPU,
double *Layer12_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int stride = 2;
// Output position
int output_Position = (filter_number * 14 * 14) // channel to work with
+ (threadIdx.x * 14)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 29 * stride)
+ (threadIdx.y * stride);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer12_Neurons_GPU[(filter_number * 29 * 29) + input_Position + (row * 29)] * Layer12_Weights_GPU[weight_Position + (row * 3)])
+ (Layer12_Neurons_GPU[(filter_number * 29 * 29) + input_Position + (row * 29) + 1] * Layer12_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer12_Neurons_GPU[(filter_number * 29 * 29) + input_Position + (row * 29) + 2] * Layer12_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer12_Mean_GPU[filter_number]) / Layer12_StanDev_GPU[filter_number];
Z = (Z * Layer12_Gamma_GPU[filter_number]) + Layer12_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer13_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** TWELFTH LAYER END **************************************************** */
/* *************************************************** THIRTEENTH LAYER START ************************************************** */
/*
Layer 13: Pointwise Separable Convolution Layer
Input: 14 * 14 * 256
Weight: 1 * 1 * 256 * 512 with a Stride of 1
Output: 16 * 16 * 512 (Handling padding for next layer)
*/
__global__ void executeThirteenthLayer_PSC(double *Layer13_Neurons_GPU,
double *Layer13_Weights_GPU,
double *Layer14_Neurons_GPU,
double *Layer13_Mean_GPU,
double *Layer13_StanDev_GPU,
double *Layer13_Gamma_GPU,
double *Layer13_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 17;
// Output position
int output_Position = (filter_number * 16 * 16) // channel to work with
+ (threadIdx.x * 16)
+ (threadIdx.y);
int weight_Position = filter_number * 256;
int input_Position = (threadIdx.x * 14)
+ (threadIdx.y);
for(int channel = 0; channel < 256; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer13_Neurons_GPU[(channel * 14 * 14) + input_Position] * Layer13_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer13_Mean_GPU[filter_number]) / Layer13_StanDev_GPU[filter_number];
Z = (Z * Layer13_Gamma_GPU[filter_number]) + Layer13_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer14_Neurons_GPU[output_Position + offset] = Z;
}
/* *************************************************** THIRTEENTH LAYER END **************************************************** */
/* *************************************************** FOURTEENTH LAYER START ************************************************** */
/*
Layer 14: Depthwise Separable Convolution Layer
Input: 16 * 16 * 512
Weight: 3 * 3 * 512 with a Stride of 1
Output: 14 * 14 * 512 (Handling padding for next layer)
*/
__global__ void executeFourteenthLayer_DSC(double *Layer14_Neurons_GPU,
double *Layer14_Weights_GPU,
double *Layer15_Neurons_GPU,
double *Layer14_Mean_GPU,
double *Layer14_StanDev_GPU,
double *Layer14_Gamma_GPU,
double *Layer14_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 14 * 14) // channel to work with
+ (threadIdx.x * 14)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 16)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer14_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16)] * Layer14_Weights_GPU[weight_Position + (row * 3)])
+ (Layer14_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 1] * Layer14_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer14_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 2] * Layer14_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer14_Mean_GPU[filter_number]) / Layer14_StanDev_GPU[filter_number];
Z = (Z * Layer14_Gamma_GPU[filter_number]) + Layer14_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer15_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** FOURTEENTH LAYER END **************************************************** */
/* *************************************************** FIFTEENTH LAYER START ************************************************** */
/*
Layer 15: Pointwise Separable Convolution Layer
Input: 14 * 14 * 512
Weight: 1 * 1 * 512 * 512 with a Stride of 1
Output: 16 * 16 * 512 (Handling padding for next layer)
*/
__global__ void executeFifteenthLayer_PSC(double *Layer15_Neurons_GPU,
double *Layer15_Weights_GPU,
double *Layer16_Neurons_GPU,
double *Layer15_Mean_GPU,
double *Layer15_StanDev_GPU,
double *Layer15_Gamma_GPU,
double *Layer15_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 17;
// Output position
int output_Position = (filter_number * 16 * 16) // channel to work with
+ (threadIdx.x * 16)
+ (threadIdx.y);
int weight_Position = filter_number * 512;
int input_Position = (threadIdx.x * 14)
+ (threadIdx.y);
for(int channel = 0; channel < 512 ; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer15_Neurons_GPU[(channel * 14 * 14) + input_Position] * Layer15_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer15_Mean_GPU[filter_number]) / Layer15_StanDev_GPU[filter_number];
Z = (Z * Layer15_Gamma_GPU[filter_number]) + Layer15_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer16_Neurons_GPU[output_Position + offset] = Z;
}
/* *************************************************** FIFTEENTH LAYER END **************************************************** */
/* *************************************************** SIXTEENTH LAYER START ************************************************** */
/*
Layer 16: Depthwise Separable Convolution Layer
Input: 16 * 16 * 512
Weight: 3 * 3 * 512 with a Stride of 1
Output: 14 * 14 * 512 (Handling padding for next layer)
*/
__global__ void executeSixteenthLayer_DSC(double *Layer16_Neurons_GPU,
double *Layer16_Weights_GPU,
double *Layer17_Neurons_GPU,
double *Layer16_Mean_GPU,
double *Layer16_StanDev_GPU,
double *Layer16_Gamma_GPU,
double *Layer16_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 14 * 14) // channel to work with
+ (threadIdx.x * 14)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 16)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer16_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16)] * Layer16_Weights_GPU[weight_Position + (row * 3)])
+ (Layer16_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 1] * Layer16_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer16_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 2] * Layer16_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer16_Mean_GPU[filter_number]) / Layer16_StanDev_GPU[filter_number];
Z = (Z * Layer16_Gamma_GPU[filter_number]) + Layer16_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer17_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** SIXTEENTH LAYER END **************************************************** */
/* *************************************************** SEVENTEENTH LAYER START ************************************************** */
/*
Layer 17: Pointwise Separable Convolution Layer
Input: 14 * 14 * 512
Weight: 1 * 1 * 512 * 512 with a Stride of 1
Output: 16 * 16 * 512 (Handling padding for next layer)
*/
__global__ void executeSeventeenthLayer_PSC(double *Layer17_Neurons_GPU,
double *Layer17_Weights_GPU,
double *Layer18_Neurons_GPU,
double *Layer17_Mean_GPU,
double *Layer17_StanDev_GPU,
double *Layer17_Gamma_GPU,
double *Layer17_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 17;
// Output position
int output_Position = (filter_number * 16 * 16) // channel to work with
+ (threadIdx.x * 16)
+ (threadIdx.y);
int weight_Position = filter_number * 512;
int input_Position = (threadIdx.x * 14)
+ (threadIdx.y);
for(int channel = 0; channel < 512 ; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer17_Neurons_GPU[(channel * 14 * 14) + input_Position] * Layer17_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer17_Mean_GPU[filter_number]) / Layer17_StanDev_GPU[filter_number];
Z = (Z * Layer17_Gamma_GPU[filter_number]) + Layer17_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer18_Neurons_GPU[output_Position + offset] = Z;
}
/* *************************************************** SEVENTEENTH LAYER END **************************************************** */
/* *************************************************** EIGHTEENTH LAYER START ************************************************** */
/*
Layer 18: Depthwise Separable Convolution Layer
Input: 16 * 16 * 512
Weight: 3 * 3 * 512 with a Stride of 1
Output: 14 * 14 * 512
*/
__global__ void executeEighteenthLayer_DSC(double *Layer18_Neurons_GPU,
double *Layer18_Weights_GPU,
double *Layer19_Neurons_GPU,
double *Layer18_Mean_GPU,
double *Layer18_StanDev_GPU,
double *Layer18_Gamma_GPU,
double *Layer18_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 14 * 14) // channel to work with
+ (threadIdx.x * 14)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 16)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer18_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16)] * Layer18_Weights_GPU[weight_Position + (row * 3)])
+ (Layer18_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 1] * Layer18_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer18_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 2] * Layer18_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer18_Mean_GPU[filter_number]) / Layer18_StanDev_GPU[filter_number];
Z = (Z * Layer18_Gamma_GPU[filter_number]) + Layer18_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer19_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** EIGHTEENTH LAYER END **************************************************** */
/* *************************************************** NINETEENTH LAYER START ************************************************** */
/*
Layer 19: Pointwise Separable Convolution Layer
Input: 14 * 14 * 512
Weight: 1 * 1 * 512 * 512 with a Stride of 1
Output: 16 * 16 * 512 (Handling padding for next layer)
*/
__global__ void executeNineteenthLayer_PSC(double *Layer19_Neurons_GPU,
double *Layer19_Weights_GPU,
double *Layer20_Neurons_GPU,
double *Layer19_Mean_GPU,
double *Layer19_StanDev_GPU,
double *Layer19_Gamma_GPU,
double *Layer19_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 17;
// Output position
int output_Position = (filter_number * 16 * 16) // channel to work with
+ (threadIdx.x * 16)
+ (threadIdx.y);
int weight_Position = filter_number * 512;
int input_Position = (threadIdx.x * 14)
+ (threadIdx.y);
for(int channel = 0; channel < 512 ; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer19_Neurons_GPU[(channel * 14 * 14) + input_Position] * Layer19_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer19_Mean_GPU[filter_number]) / Layer19_StanDev_GPU[filter_number];
Z = (Z * Layer19_Gamma_GPU[filter_number]) + Layer19_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer20_Neurons_GPU[output_Position + offset] = Z;
}
/* *************************************************** NINETEENTH LAYER END **************************************************** */
/* *************************************************** TWENTY LAYER START ************************************************** */
/*
Layer 20: Depthwise Separable Convolution Layer
Input: 16 * 16 * 512
Weight: 3 * 3 * 512 with a Stride of 1
Output: 14 * 14 * 512
*/
__global__ void executeTwentyLayer_DSC(double *Layer20_Neurons_GPU,
double *Layer20_Weights_GPU,
double *Layer21_Neurons_GPU,
double *Layer20_Mean_GPU,
double *Layer20_StanDev_GPU,
double *Layer20_Gamma_GPU,
double *Layer20_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 14 * 14) // channel to work with
+ (threadIdx.x * 14)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 16)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer20_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16)] * Layer20_Weights_GPU[weight_Position + (row * 3)])
+ (Layer20_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 1] * Layer20_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer20_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 2] * Layer20_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer20_Mean_GPU[filter_number]) / Layer20_StanDev_GPU[filter_number];
Z = (Z * Layer20_Gamma_GPU[filter_number]) + Layer20_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer21_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** TWENTY LAYER END **************************************************** */
/* *************************************************** TWENTYONE LAYER START ************************************************** */
/*
Layer 21: Pointwise Separable Convolution Layer
Input: 14 * 14 * 512
Weight: 1 * 1 * 512 * 512 with a Stride of 1
Output: 16 * 16 * 512 (Handling padding for next layer)
*/
__global__ void executeTwentyOneLayer_PSC(double *Layer21_Neurons_GPU,
double *Layer21_Weights_GPU,
double *Layer22_Neurons_GPU,
double *Layer21_Mean_GPU,
double *Layer21_StanDev_GPU,
double *Layer21_Gamma_GPU,
double *Layer21_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 17;
// Output position
int output_Position = (filter_number * 16 * 16) // channel to work with
+ (threadIdx.x * 16)
+ (threadIdx.y);
int weight_Position = filter_number * 512;
int input_Position = (threadIdx.x * 14)
+ (threadIdx.y);
for(int channel = 0; channel < 512 ; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer21_Neurons_GPU[(channel * 14 * 14) + input_Position] * Layer21_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer21_Mean_GPU[filter_number]) / Layer21_StanDev_GPU[filter_number];
Z = (Z * Layer21_Gamma_GPU[filter_number]) + Layer21_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer22_Neurons_GPU[output_Position + offset] = Z;
}
/* *************************************************** TWENTYONE LAYER END **************************************************** */
/* *************************************************** TWENTYTWO LAYER START ************************************************** */
/*
Layer 22: Depthwise Separable Convolution Layer
Input: 16 * 16 * 512
Weight: 3 * 3 * 512 with a Stride of 1
Output: 14 * 14 * 512
*/
__global__ void executeTwentyTwoLayer_DSC(double *Layer22_Neurons_GPU,
double *Layer22_Weights_GPU,
double *Layer23_Neurons_GPU,
double *Layer22_Mean_GPU,
double *Layer22_StanDev_GPU,
double *Layer22_Gamma_GPU,
double *Layer22_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 14 * 14) // channel to work with
+ (threadIdx.x * 14)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 16)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer22_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16)] * Layer22_Weights_GPU[weight_Position + (row * 3)])
+ (Layer22_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 1] * Layer22_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer22_Neurons_GPU[(filter_number * 16 * 16) + input_Position + (row * 16) + 2] * Layer22_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer22_Mean_GPU[filter_number]) / Layer22_StanDev_GPU[filter_number];
Z = (Z * Layer22_Gamma_GPU[filter_number]) + Layer22_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer23_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** TWENTYTWO LAYER END **************************************************** */
/* *************************************************** TWENTYTHREE LAYER START ************************************************** */
/*
Layer 23: Pointwise Separable Convolution Layer
Input: 14 * 14 * 512
Weight: 1 * 1 * 512 * 512 with a Stride of 1
Output: 14 * 14 * 512 (Handling padding for next layer)
*/
__global__ void executeTwentyThreeLayer_PSC(double *Layer23_Neurons_GPU,
double *Layer23_Weights_GPU,
double *Layer24_Neurons_GPU,
double *Layer23_Mean_GPU,
double *Layer23_StanDev_GPU,
double *Layer23_Gamma_GPU,
double *Layer23_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 15 * 15) // channel to work with
+ (threadIdx.x * 15)
+ (threadIdx.y);
int weight_Position = filter_number * 512;
int input_Position = (threadIdx.x * 14)
+ (threadIdx.y);
for(int channel = 0; channel < 512 ; channel++) // This is the channel loop as we have 32 channels to work with
{
product += (Layer23_Neurons_GPU[(channel * 14 * 14) + input_Position] * Layer23_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer23_Mean_GPU[filter_number]) / Layer23_StanDev_GPU[filter_number];
Z = (Z * Layer23_Gamma_GPU[filter_number]) + Layer23_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer24_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** TWENTYTHREE LAYER END **************************************************** */
/* *************************************************** TWENTYFOUR LAYER START ************************************************** */
/*
Layer 24: Depthwise Separable Convolution Layer
Input: 15 * 15 * 512
Weight: 3 * 3 * 512 with a Stride of 2
Output: 14 * 14 * 512 (Handling padding for next layer)
*/
__global__ void executeTwentyFourLayer_DSC(double *Layer24_Neurons_GPU,
double *Layer24_Weights_GPU,
double *Layer25_Neurons_GPU,
double *Layer24_Mean_GPU,
double *Layer24_StanDev_GPU,
double *Layer24_Gamma_GPU,
double *Layer24_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int stride = 2;
// Output position
int output_Position = (filter_number * 7 * 7) // channel to work with
+ (threadIdx.x * 7)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 15 * stride)
+ (threadIdx.y * stride);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer24_Neurons_GPU[(filter_number * 15 * 15) + input_Position + (row * 15)] * Layer24_Weights_GPU[weight_Position + (row * 3)])
+ (Layer24_Neurons_GPU[(filter_number * 15 * 15) + input_Position + (row * 15) + 1] * Layer24_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer24_Neurons_GPU[(filter_number * 15 * 15) + input_Position + (row * 15) + 2] * Layer24_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer24_Mean_GPU[filter_number]) / Layer24_StanDev_GPU[filter_number];
Z = (Z * Layer24_Gamma_GPU[filter_number]) + Layer24_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer25_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** TWENTYFOUR LAYER END **************************************************** */
/* *************************************************** TWENTYFIVE LAYER START ************************************************** */
/*
Layer 25: Pointwise Separable Convolution Layer
Input: 7 * 7 * 512
Weight: 1 * 1 * 512 * 1024 with a Stride of 1
Output: 9 * 9 * 1024 (Handling padding for next layer)
*/
__global__ void executeTwentyFiveLayer_PSC(double *Layer25_Neurons_GPU,
double *Layer25_Weights_GPU,
double *Layer26_Neurons_GPU,
double *Layer25_Mean_GPU,
double *Layer25_StanDev_GPU,
double *Layer25_Gamma_GPU,
double *Layer25_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
int offset = 10;
// Output position
int output_Position = (filter_number * 9 * 9) // channel to work with
+ (threadIdx.x * 9)
+ (threadIdx.y);
int weight_Position = filter_number * 512;
int input_Position = (threadIdx.x * 7)
+ (threadIdx.y);
for(int channel = 0; channel < 512 ; channel++)
{
product += (Layer25_Neurons_GPU[(channel * 7 * 7) + input_Position] * Layer25_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer25_Mean_GPU[filter_number]) / Layer25_StanDev_GPU[filter_number];
Z = (Z * Layer25_Gamma_GPU[filter_number]) + Layer25_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer26_Neurons_GPU[output_Position + offset] = Z;
}
/* *************************************************** TWENTYFIVE LAYER END **************************************************** */
/* *************************************************** TWENTYSIX LAYER START ************************************************** */
/*
Layer 26: Depthwise Separable Convolution Layer
Input: 9 * 9 * 1024
Weight: 3 * 3 * 1024 with a Stride of 1
Output: 7 * 7 * 1024 (Handling padding for next layer)
*/
__global__ void executeTwentySixLayer_DSC(double *Layer26_Neurons_GPU,
double *Layer26_Weights_GPU,
double *Layer27_Neurons_GPU,
double *Layer26_Mean_GPU,
double *Layer26_StanDev_GPU,
double *Layer26_Gamma_GPU,
double *Layer26_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 7 * 7) // channel to work with
+ (threadIdx.x * 7)
+ (threadIdx.y);
int weight_Position = filter_number * 9;
int input_Position = (threadIdx.x * 9)
+ (threadIdx.y);
for(int row = 0; row < 3; row++) // This is the Row Loop
{
product += ((Layer26_Neurons_GPU[(filter_number * 9 * 9) + input_Position + (row * 9)] * Layer26_Weights_GPU[weight_Position + (row * 3)])
+ (Layer26_Neurons_GPU[(filter_number * 9 * 9) + input_Position + (row * 9) + 1] * Layer26_Weights_GPU[weight_Position + (row * 3) + 1])
+ (Layer26_Neurons_GPU[(filter_number * 9 * 9) + input_Position + (row * 9) + 2] * Layer26_Weights_GPU[weight_Position + (row * 3) + 2]));
}
double Z = (product - Layer26_Mean_GPU[filter_number]) / Layer26_StanDev_GPU[filter_number];
Z = (Z * Layer26_Gamma_GPU[filter_number]) + Layer26_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer27_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** TWENTYSIX LAYER END **************************************************** */
/* *************************************************** TWENTYSEVEN LAYER START ************************************************** */
/*
Layer 27: Pointwise Separable Convolution Layer
Input: 7 * 7 * 1024
Weight: 1 * 1 * 1024 * 1024 with a Stride of 1
Output: 7 * 7 * 1024
*/
__global__ void executeTwentySevenLayer_PSC(double *Layer27_Neurons_GPU,
double *Layer27_Weights_GPU,
double *Layer28_Neurons_GPU,
double *Layer27_Mean_GPU,
double *Layer27_StanDev_GPU,
double *Layer27_Gamma_GPU,
double *Layer27_Beta_GPU
)
{
double product = 0.0;
int filter_number = blockIdx.x;
// Output position
int output_Position = (filter_number * 7 * 7) // channel to work with
+ (threadIdx.x * 7)
+ (threadIdx.y);
int weight_Position = filter_number * 1024;
int input_Position = (threadIdx.x * 7)
+ (threadIdx.y);
for(int channel = 0; channel < 1024 ; channel++)
{
product += (Layer27_Neurons_GPU[(channel * 7 * 7) + input_Position] * Layer27_Weights_GPU[weight_Position + channel]);
}
double Z = (product - Layer27_Mean_GPU[filter_number]) / Layer27_StanDev_GPU[filter_number];
Z = (Z * Layer27_Gamma_GPU[filter_number]) + Layer27_Beta_GPU[filter_number];
// ReLU Layer
if(Z < 0)
Z = 0; // max(0,x)
// ReLU 6 Layer
if(Z > 6)
Z = 6.0;
Layer28_Neurons_GPU[output_Position] = Z;
}
/* *************************************************** TWENTYSEVEN LAYER END **************************************************** */
/* *************************************************** TWENTYEIGHT LAYER START ************************************************** */
/*
Layer 28: Global Average Pooling Layer
Input: 7 * 7 * 1024
Weight: None
Output: 1 * 1 * 1024
*/
__global__ void executeTwentyEightLayer_AvgPooling(double *Layer28_Neurons_GPU,
double *Layer29_Neurons_GPU
)
{
double sum = 0.0;
int filter_number = threadIdx.x * 32 + threadIdx.y;
// Output position
int output_Position = filter_number;
int input_Position_start = filter_number * 49;
for(int row = 0 ; row < 7 ; row++)
for(int col = 0 ; col < 7 ; col++)
sum += Layer28_Neurons_GPU[input_Position_start + row * 7 + col];
sum = sum / 49;
Layer29_Neurons_GPU[output_Position] = sum;
}
/* *************************************************** TWENTYEIGHT LAYER END **************************************************** */
/* *************************************************** TWENTYNINE LAYER START ************************************************** */
/*
Layer 29: Fully Connected Layer
Input: 1 * 1 * 1024
Weight: 1000 * 1024
Bias: 1000
Output: 1000
*/
__global__ void executeTwentyNineLayer_FullyConnected(double *Layer29_Neurons_GPU,
double *Layer30_Neurons_GPU,
double *Layer29_Weights_GPU,
double *Layer29_Bias_GPU
)
{
double product = 0.0;
int filter_number = threadIdx.x;
// Output position
int output_Position = filter_number;
int weight_Position = filter_number * 1024;
for(int channel = 0; channel < 1024 ; channel++)
{
product += (Layer29_Neurons_GPU[channel] * Layer29_Weights_GPU[weight_Position + channel]);
}
//Adding Bias to the output
product += Layer29_Bias_GPU[filter_number];
Layer30_Neurons_GPU[output_Position] = product;
}
/* *************************************************** TWENTYNINE LAYER END **************************************************** */
|
6,272 | #define I(d,i,j) (i)*(d)+(j)
typedef struct{
float *v;
int d;
int size;
} Grid;
__global__ void cero(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(1<=m.d && j<=m.d)
m.v[I(m.d,i,j)]=0.0;
}
__global__ void random(Grid m){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<m.d-1 && j< m.d-1 && i>0 && j>0) // Interior points
m.v[I(m.d,i,j)]=10.1+sinf(i+cosf(j));
}
__global__ void suaviza_r(Grid u, Grid f){
double h2 = pow(1.0/(u.d-1),2);
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<u.d-1 && j<u.d-1 && i>0 && j>0){ // Interior points
if((i+j)%2==0)
u.v[I(u.d,i,j)]=0.25*(f.v[I(u.d,i ,j )]*h2
+u.v[I(u.d,i-1,j )]
+u.v[I(u.d,i+1,j )]
+u.v[I(u.d,i ,j-1)]
+u.v[I(u.d,i ,j+1)]);
}
}
__global__ void suaviza_n(Grid u, Grid f){
double h2 = pow(1.0/(u.d-1),2);
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<u.d-1 && j<u.d-1 && i>0 && j>0){ // Interior points
if((i+j)%2==1)
u.v[I(u.d,i,j)]=0.25*(f.v[I(u.d,i ,j )]*h2
+u.v[I(u.d,i-1,j )]
+u.v[I(u.d,i+1,j )]
+u.v[I(u.d,i ,j-1)]
+u.v[I(u.d,i ,j+1)]);
}
}
__global__ void defecto(Grid u, Grid f, Grid d){
double h2 = pow(1.0/(u.d-1),2);
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<u.d-1 && j<u.d-1 && i>0 && j > 0){ //Interior points
d.v[I(u.d,i,j)] = f.v[I(u.d,i ,j )]
-(4*u.v[I(u.d,i ,j )]
-u.v[I(u.d,i-1,j )]
-u.v[I(u.d,i+1,j )]
-u.v[I(u.d,i ,j-1)]
-u.v[I(u.d,i ,j+1)])/h2;
}
}
__global__ void restringe(Grid sup, Grid in){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<in.d-1 && j<in.d-1 && i>0 && j>0){ //Interior points
in.v[I(in.d,i,j)] = (4* sup.v[I(sup.d,2*i ,2*j )]
+2*(sup.v[I(sup.d,2*i-1,2*j )]
+sup.v[I(sup.d,2*i+1,2*j )]
+sup.v[I(sup.d,2*i ,2*j-1)]
+sup.v[I(sup.d,2*i ,2*j+1)])
+sup.v[I(sup.d,2*i-1,2*j-1)]
+sup.v[I(sup.d,2*i-1,2*j+1)]
+sup.v[I(sup.d,2*i+1,2*j-1)]
+sup.v[I(sup.d,2*i+1,2*j+2)])/16;
}
}
__global__ void exacta(Grid u, Grid f){
u.v[I(u.d,1,1)]=f.v[I(u.d,1,1)]/16;
}
__global__ void interpola(Grid u, Grid v){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<u.d && j<u.d){
v.v[I(v.d,2*i,2*j)] = u.v[I(u.d,i,j)];
if(2*i+1<v.d)
v.v[I(v.d,2*i+1,2*j )] = (u.v[I(u.d,i,j)]+u.v[I(u.d,i+1,j)])/2;
if(2*j+1<v.d)
v.v[I(v.d,2*i ,2*j+1)] = (u.v[I(u.d,i,j)]+u.v[I(u.d,i ,j+1)])/2;
if(2*i+1<v.d && 2*j+1<v.d)
v.v[I(v.d,2*i+1, 2*j+1)] = (u.v[I(u.d,i,j)]+u.v[I(u.d,i+1,j )]
+ u.v[I(u.d,i, j+1)]+u.v[I(u.d, i+1, j+1)])/4;
}
}
__global__ void suma(Grid u, Grid v){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<u.d && j<u.d){
u.v[I(u.d,i,j)]+=v.v[I(u.d,i,j)];
}
}
__global__ void maxx(Grid d, double *def)
{
int i = blockIdx.x + blockDim.x + threadIdx.x;
int j;
def[i]=0.0;
for(j=0;j<d.d;j++){
if(abs(d.v[I(d.d,i,j)])>def[i]){
def[i]=abs(d.v[I(d.d,i,j)]);
}
}
}
|
6,273 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#define SIZ 1024
__global__ //이게 device에서 실행될 function 각 thread가 일정량실행
void countnum(int* countarr,int* datarr,int n){ //threadIdx.x+blockDim.x*blockIdx.x
int i=threadIdx.x+blockDim.x * blockIdx.x;
if(i<n)
{
int num=datarr[i];
atomicAdd(&countarr[num],1);
}
//atomicAdd(&arr[0],1);
//printf(" %d ",arr[0]);
}
/*__global__
void countbignum(int* countarr,int* datarr,int n){
int offset=1;
if(offset<n)
{
int num=datarr[offset];
atomicAdd(&countarr[num],1);
}
}*/
__host__ void counting_sort(int arr[], int size, int max_val)
{
// fill in
int* counting;
counting=(int*)malloc(sizeof(int)*size);
int* counting_d;
int* datarr_d;//input array
cudaMalloc((void **)&counting_d,sizeof(int)*max_val);
cudaMemset(counting_d,0,max_val*(sizeof(int)));
cudaMalloc((void**) &datarr_d,sizeof(int)*size);
cudaMemcpy(datarr_d,arr,sizeof(int)*size,cudaMemcpyHostToDevice);
int blocknum;
blocknum=ceil((double)size/SIZ);
// countnum<<<1024,SIZ>>>(counting_d,datarr_d,size);
//countnum<<<65535,SIZ>>>(counting_d,datarr_d,size);//1024가 max
// countnum<<<2097152,SIZ>>>(counting_d,datarr_d,size);
//countnum<<<1048576,SIZ>>>(counting_d,datarr_d,size);
countnum<<<blocknum,SIZ>>>(counting_d,datarr_d,size);
cudaDeviceSynchronize();
cudaMemcpy(counting,counting_d,sizeof(int)*max_val,cudaMemcpyDeviceToHost);
int index;
index=0;
for(int j=0;j<max_val;j++)
{
for(int q=0;q<counting[j];q++)
{
arr[index++]=j;
}
}
free(counting);
cudaFree(counting_d);
cudaFree(datarr_d);
}
|
6,274 | // This is a CUDA program that does the following:
//
// 1. On the host, fill the A and B arrays with random numbers
// 2. On the host, print the initial values of the A and B arrays
// 3. Copy the A and B arrays from the host to the device
// 4. On the device, add the A and B vectors and store the result in C
// 5. Copy the C array from the device to the host
// 6. On the host, print the result
//
// Author: Aaron Weeden, Shodor, 2016
// Import library so we can call printf()
#include <stdio.h>
// Import library so we can call exit(), malloc(), free(), random(), etc.
#include <stdlib.h>
// Import library so we can call time()
#include <time.h>
// Define the number of numbers in each array
#define NUM_COUNT 10
// Define the number of bytes in each array
#define BYTE_COUNT ((NUM_COUNT) * sizeof(int))
// Declare functions that will be defined later
void TryMalloc(void * const err);
// Start the program
int main()
{
// Declare variables for the host and device arrays
int * hostA;
int * hostB;
int * hostC;
int * deviceA;
int * deviceB;
int * deviceC;
// Allocate memory for the host arrays
// Allocate memory for the device arrays
// Initialize the random number generator
srandom(time(NULL));
// On the host, fill the A and B arrays with random numbers
printf("Expected Result:\n");
for (int i = 0; i < NUM_COUNT; i++)
{
hostA[i] = 100 * random() / RAND_MAX;
hostB[i] = 100 * random() / RAND_MAX;
printf("\thostC[%d] should be %d + %d\n", i, hostA[i], hostB[i]);
}
// Copy the A and B arrays from the host to the device
// On the device, add the A and B vectors and store the result in C
// Copy the C array from the device to the host
// On the host, print the result
printf("Result:\n");
for (int i = 0; i < NUM_COUNT; i++)
{
printf("\thostC[%d] = %d\n", i, hostC[i]);
}
// De-allocate memory for the device arrays
// De-allocate memory for the host arrays
return 0;
}
// Define a function to check whether a malloc() call was successful
void TryMalloc(void * const err)
{
if (err == NULL)
{
fprintf(stderr, "malloc error\n");
exit(EXIT_FAILURE);
}
}
|
6,275 | extern "C"
__global__ void backwardSquaredLossKernel (int length, float *predictions, float *targets, float *result)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < length) {
result[index] = predictions[index] - targets[index];
}
} |
6,276 | #include "includes.h"
__global__ void ComputeConstantResidualKernel (double *VMed, double *invRmed, int *Nshift, int *NoSplitAdvection, int nsec, int nrad, double dt, double *Vtheta, double *VthetaRes, double *Rmed, int FastTransport)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double maxfrac, Ntilde, Nround, invdt, dpinvns;
long nitemp;
if (i<nrad && j<nsec){
if (FastTransport)
maxfrac = 1.0;
else
maxfrac = 0.0;
invdt = 1.0/dt;
dpinvns = 2.0*PI/(double)nsec;
Ntilde = VMed[i]*invRmed[i]*dt*(double)nsec/2.0/PI;
Nround = floor(Ntilde+0.5);
nitemp = (long)Nround;
Nshift[i] = (long)nitemp;
Vtheta[i*nsec + j] = (Ntilde-Nround)*Rmed[i]*invdt*dpinvns;
if (maxfrac < 0.5){
NoSplitAdvection[i] = YES;
VthetaRes[i*nsec + j] += Vtheta[i*nsec + j];
Vtheta[i*nsec + j] = 0.0;
}
else{
NoSplitAdvection[i] = NO;
}
}
} |
6,277 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#define DATA_SIZE 1048576
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if(count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for(i = 0; i < count; i++) {
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if(prop.major >= 1) {
break;
}
}
}
if(i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
void GenerateNumbers(int *number, int size)
{
for(int i = 0; i < size; i++) {
number[i] = rand() % 10;
}
}
__global__ static void sumOfSquares(int *num, int* result, clock_t* time)
{
int sum = 0;
int i;
clock_t start = clock();
for(i = 0; i < DATA_SIZE; i++) {
sum += num[i] * num[i];
}
*result = sum;
*time = clock() - start;
}
int main()
{
if(!InitCUDA()) {
return 0;
}
printf("CUDA initialized.\n");
int data[DATA_SIZE];
GenerateNumbers(data, DATA_SIZE);
int* gpudata, *result;
clock_t* time;
clock_t start_g, stop_g;
start_g = clock();
cudaMalloc((void**) &gpudata, sizeof(int) * DATA_SIZE);
cudaMalloc((void**) &result, sizeof(int));
cudaMalloc((void**) &time, sizeof(clock_t));
cudaMemcpy(gpudata, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice);
sumOfSquares<<<1, 1, 0>>>(gpudata, result, time);
int sum;
clock_t time_used;
cudaMemcpy(&sum, result, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&time_used, time, sizeof(clock_t), cudaMemcpyDeviceToHost);
cudaFree(gpudata);
cudaFree(result);
cudaFree(time);
stop_g = (clock() - start_g);
printf("sum (GPU): %d time: %f timeg: %f \n", sum, (double)time_used / CLOCKS_PER_SEC, (double) stop_g / CLOCKS_PER_SEC);
clock_t start, stop;
start = clock();
sum = 0;
for(int i = 0; i < DATA_SIZE; i++) {
sum += data[i] * data[i];
}
stop = clock() - start;
printf("sum (CPU): %d time: %f \n", sum, (double)stop / CLOCKS_PER_SEC);
return 0;
}
|
6,278 | #include "includes.h"
__global__ void dwt_per_Y_O(float *d_ip, int rows, int cols, int cA_rows, int filt_len, int Halo_steps, float *d_cL, float *d_cH)
{
extern __shared__ float s_Data[];
//Offset to the upper halo edge
const int baseX = blockIdx.x * Y_BLOCKDIM_X + threadIdx.x;
const int baseY = ((blockIdx.y * 2 * Y_RESULT_STEPS) - Halo_steps) * Y_BLOCKDIM_Y + threadIdx.y;
const int baseY1 = (blockIdx.y * Y_RESULT_STEPS) * Y_BLOCKDIM_Y + threadIdx.y;
if (baseX < cols)
{
d_ip += baseY * cols + baseX;
d_cL += baseY1 * cols + baseX;
d_cH += baseY1 * cols + baseX;
//Loading data to shared memory
//Upper halo
#pragma unroll
for (int i = 0; i < Halo_steps; i++)
{
if (baseY + i * Y_BLOCKDIM_Y == -1) s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = d_ip[(rows - 1) * cols];
else s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (baseY + i * Y_BLOCKDIM_Y >= 0) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) + ((rows + 1)*cols)];
}
//Lower halo + Main data
#pragma unroll
for (int i = Halo_steps; i < Halo_steps + 2 * Y_RESULT_STEPS + Halo_steps; i++)
{
if (baseY + i * Y_BLOCKDIM_Y == rows) s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = d_ip[(i * Y_BLOCKDIM_Y * (cols - 1))];
else s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + threadIdx.y + i * Y_BLOCKDIM_Y] = (baseY + i * Y_BLOCKDIM_Y < rows) ? d_ip[i * Y_BLOCKDIM_Y * cols] : d_ip[(i * Y_BLOCKDIM_Y * cols) - ((rows + 1)*cols)];
}
__syncthreads();
//Compute and store results
#pragma unroll
for (int i = 0; i < Y_RESULT_STEPS; i++)
{
if ((baseY1 + i * Y_BLOCKDIM_Y < cA_rows)) {
int l2 = filt_len / 2;
float sum_cL = 0, sum_cH = 0;
for (int l = 0; l < filt_len; ++l)
{
sum_cL += c_lpd[l] * s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + 2 * threadIdx.y + 2 * i * Y_BLOCKDIM_Y + Halo_steps * Y_BLOCKDIM_Y + l2 - l];
sum_cH += c_hpd[l] * s_Data[(threadIdx.x*(2 * Y_RESULT_STEPS + 2 * Halo_steps) *Y_BLOCKDIM_Y) + 2 * threadIdx.y + 2 * i * Y_BLOCKDIM_Y + Halo_steps * Y_BLOCKDIM_Y + l2 - l];
}
d_cL[i * Y_BLOCKDIM_Y * cols] = sum_cL;
d_cH[i * Y_BLOCKDIM_Y * cols] = sum_cH;
}
}
}
} |
6,279 | #include <stdio.h>
#include <sys/time.h>
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return (double) tp.tv_sec + (double)tp.tv_usec*1e-6;
}
__device__ void sleep(float t, clock_t clock_rate) {
clock_t t0 = clock64();
clock_t t1 = t0;
while ((t1 - t0)/(clock_rate*1000.0f) < t)
t1 = clock64();
}
__global__ void mykernel(clock_t clock_rate) {
sleep(1.0, clock_rate);
}
int main(int argc, char* argv[]) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int mp = prop.multiProcessorCount;
clock_t clock_rate = prop.clockRate;
int max_num_blocks = atoi(argv[1]);
for (int num_blocks = 1; num_blocks <= max_num_blocks; num_blocks++) {
dim3 block(1);
dim3 grid(num_blocks); /* N blocks */
double start = cpuSecond();
mykernel<<<grid,block>>>(clock_rate);
cudaDeviceSynchronize();
double etime = cpuSecond() - start;
printf("blocks %10d\n",num_blocks);
printf("SMs %10d\n",mp);
printf("blocks/SM %10.2f\n",num_blocks/((double)mp));
printf("time %10.2f\n",etime);
printf("\n");
}
cudaDeviceReset();
}
|
6,280 | #include <cuda.h>
#include <iostream>
using namespace std;
__global__ void InitialClusteringKernel_CUDA (float* im_vals, unsigned short* max_response_r, unsigned short* max_response_c, unsigned short* max_response_z , int r, int c, int z, int scale_xy, int scale_z, int offset)
{
int iGID = blockIdx.x * blockDim.x + threadIdx.x + offset; //global index
if (iGID >= r * c * z)
return;
int rem = ((long)iGID) % (r*c);
int k1 = ((int)iGID-rem) / (r*c);
int j1 = ((long)rem) % c;
int i1 = (rem-j1)/c;
int min_r = (int) max((float)(0.0),(float)(i1-scale_xy));
int min_c = (int) max((float)(0.0),(float)(j1-scale_xy));
int min_z = (int) max((float)(0.0),(float)(k1-scale_z));
int max_r = (int) min((float)(r-1),(float)(i1+scale_xy));
int max_c = (int) min((float)(c-1),(float)(j1+scale_xy));
int max_z = (int) min((float)(z-1),(float)(k1+scale_z));
//If we are running on the GPU, it makes no sense to load this big array just to save computation since seed point should be the maximum anyways
//if(local_max_vals[(k1*r*c)+(i1*c)+j1] == 0) //if current pixel is not a seed point
//{
float mx = im_vals[(min_z*r*c)+(min_r*c)+min_c];//A[r1][c1][z1];
//Do not access arrays in a hot loop if you can just just do all the updates at the end, much faster to use registers
int r_temp = min_r;
int c_temp = min_c;
int z_temp = min_z;
float im_vals_temp;
for(int i= min_r; i<= max_r; i++)
{
for(int j= min_c; j <= max_c; j++)
{
for(int k = min_z; k <= max_z; k++)
{
im_vals_temp = im_vals[(k*r*c)+(i*c)+j];
if( im_vals_temp >= mx)
{
mx = im_vals_temp;
r_temp = i;
c_temp = j;
z_temp = k;
}
}
}
}
max_response_r[i1 * (c * z) + j1 * z + k1] = r_temp;
max_response_c[i1 * (c * z) + j1 * z + k1] = c_temp;
max_response_z[i1 * (c * z) + j1 * z + k1] = z_temp;
//}
}
extern "C"
void initialClustering_CUDA (float* im_vals, unsigned short* local_max_vals, unsigned short* max_response_r, unsigned short* max_response_c, unsigned short* max_response_z , int r, int c, int z, int scale_xy, int scale_z)
{
cout << "Entering initialClustering_CUDA" << endl;
cudaError_t errorcode;
float* dev_im_vals;
//unsigned short* dev_local_max_vals;
unsigned short* dev_max_response_r;
unsigned short* dev_max_response_c;
unsigned short* dev_max_response_z;
size_t free_mem, total_mem;
cudaMemGetInfo(&free_mem, &total_mem);
cout << free_mem / (double)(1024 * 1024) << " " << total_mem / (double)(1024 * 1024) << endl;
cout << "Allocating " << (sizeof(*im_vals) * r * c * z)/(double)(1024*1024) << " MB of memory on GPU for im_vals" << endl;
//cout << "Allocating " << (sizeof(*local_max_vals) * r * c * z)/(double)(1024*1024) << " MB of memory on GPU for local_max_vals" << endl;
cout << "Allocating " << (sizeof(*max_response_r) * r * c * z)/(double)(1024*1024) << " MB of memory on GPU for max_response_r" << endl;
cout << "Allocating " << (sizeof(*max_response_c) * r * c * z)/(double)(1024*1024) << " MB of memory on GPU for max_response_c" << endl;
cout << "Allocating " << (sizeof(*max_response_z) * r * c * z)/(double)(1024*1024) << " MB of memory on GPU for max_response_z" << endl;
//Allocate memory on device
errorcode = cudaMalloc((void**) &dev_im_vals, r * c * z * sizeof(*im_vals));
//errorcode = cudaMalloc((void**) &dev_local_max_vals, r * c * z * sizeof(*local_max_vals));
errorcode = cudaMalloc((void**) &dev_max_response_r, r * c * z * sizeof(*dev_max_response_r));
errorcode = cudaMalloc((void**) &dev_max_response_c, r * c * z * sizeof(*dev_max_response_c));
errorcode = cudaMalloc((void**) &dev_max_response_z, r * c * z * sizeof(*dev_max_response_z));
//cout << errorcode << endl;
//Copy host memory contents to device contents
cudaMemcpy(dev_im_vals, im_vals, r * c * z * sizeof(*im_vals), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_local_max_vals, local_max_vals, r * c * z * sizeof(*local_max_vals), cudaMemcpyHostToDevice);
//prefer 48 KB L1
CUresult drivererrorcode = cuCtxSetCacheConfig(CU_FUNC_CACHE_PREFER_L1);
//cout << drivererrorcode << endl;
int device;
cudaDeviceProp device_prop;
cudaGetDevice(&device);
cudaGetDeviceProperties(&device_prop, device);
int threadsPerBlock = device_prop.maxThreadsDim[0];
//int threadsPerBlock = 32;
//int numBlocks = device_prop.multiProcessorCount;
int numBlocks = device_prop.maxGridSize[0];
//Run kernel repeatedly with offset since we cannot launch too many threads at once
for (int k = 0; k < r * c * z; k+= numBlocks * threadsPerBlock) //Run kernel on groups of pixels at a time
{
InitialClusteringKernel_CUDA<<< numBlocks , threadsPerBlock >>>(dev_im_vals, dev_max_response_r, dev_max_response_c, dev_max_response_z , r, c, z, scale_xy, scale_z, k);
}
//Copy device memory contents back to host memory
cudaMemcpy(max_response_r, dev_max_response_r, r * c * z * sizeof(*max_response_r), cudaMemcpyDeviceToHost);
cudaMemcpy(max_response_c, dev_max_response_c, r * c * z * sizeof(*max_response_c), cudaMemcpyDeviceToHost);
cudaMemcpy(max_response_z, dev_max_response_z, r * c * z * sizeof(*max_response_z), cudaMemcpyDeviceToHost);
cout << cudaGetErrorString(cudaGetLastError()) << endl;
//Block until all precious commands are complete
cudaThreadSynchronize();
cudaFree(dev_im_vals);
//cudaFree(dev_local_max_vals);
cudaFree(max_response_r);
cudaFree(max_response_c);
cudaFree(max_response_z);
cout << "CUDA done" << endl;
} |
6,281 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
extern "C"
{
// Based on example code for random exponential
__device__ float rexpo(curandState *state, float lambda){
float value;
value = -log(curand_uniform(state))/lambda;
return value;
}// rexpo
__device__ float psi_function(float lo, float alph, float z){
float returner = 0;
if(lo < alph){
returner = expf(-1*(pow((alph - z),2)/2));
}else{
returner = expf(-1*pow((lo-alph),2)/2)*expf(-1*(pow((alph - z),2)/2));
}
return returner;
}// psi_function
__global__ void
rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int mu_len, int sigma_len,
int lo_len, int hi_len,
int maxtries, const int seed_a,
const int seed_b, const int seed_c)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Declaration of other needed variables
int found = 0, right_truncation = 0, while_iter = 0;
float testval = 0, lowbound = 0, zval = 0, alpha = 0, psi = 0, u = 0;
// Only evaluate if thread is within the range of values we need
if(idx < n){
// Setup the RNG:
curandState rng;
curand_init(seed_a + idx * seed_b,seed_c,0,&rng);
// Sample:
while((found == 0) && while_iter < maxtries){
testval = mu[idx] + sigma[idx] * curand_normal(&rng);
if(testval < hi[idx]){
if(testval > lo[idx]){
found = 1;
}
}
while_iter++;
}//while
if(found == 1){
vals[idx] = testval;
}else{
// Robert's approximation method - coded for right truncation only
if(lo[idx] > mu[idx]){
right_truncation = 1;
lowbound = lo[idx];
}else{
lowbound = -1 * hi[idx] + 2 * mu[idx];
}
// Compute optimum alpha (does not change and doesn't need to be in loop below)
//ADD EMERGENCY EXIT
alpha = (lowbound + sqrtf(pow(lowbound,2)+4))/2;
while(!found){
zval = lowbound + rexpo(&rng, alpha);
psi = psi_function(lowbound, alpha, zval);
u = curand_uniform(&rng);
if(u < psi){
found = 1;
if(right_truncation == 1){
vals[idx] = mu[idx] + sigma[idx] * zval;
}else{
vals[idx] = mu[idx] - sigma[idx] * zval;
}
}
}// while
}//ifelse
}// if(idx < n)
return;
} // rtruncnorm_kernel
} // END extern "C"
|
6,282 | #include <memory>
#include <string>
#include <stdexcept>
#include <vector>
#include <chrono>
#include <iostream>
#include <algorithm>
#include <cuda.h>
#include <cuda_runtime_api.h>
constexpr size_t n_thread = 128;
constexpr size_t n_rep = 10;
constexpr size_t n_element_lo = 512;
constexpr size_t n_element_hi = 131072;
constexpr size_t n_epoch_lo = 32;
constexpr size_t n_epoch_hi = 32;
constexpr size_t n_serial_lo = 16;
constexpr size_t n_serial_hi = 16;
constexpr size_t n_parallel_lo = 8;
constexpr size_t n_parallel_hi = 128;
using timer = std::chrono::high_resolution_clock;
using nsecs = std::chrono::microseconds;
namespace kernels {
__global__ void empty(size_t n) {}
__global__ void axpy(double *y, double* x, double alpha, size_t n) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) { y[i] += alpha*x[i]; }
}
__device__ double f(double x) { return exp(cos(x)) - 2; }
__device__ double fp(double x) { return -sin(x) * exp(cos(x)); }
__global__ void newton(size_t n_iter, double *x, size_t n) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < n) {
auto x0 = x[i];
for(int iter = 0; iter < n_iter; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
}
using result = std::vector<double>;
#define cuda_api(f, ...) \
do { \
auto rc = f(__VA_ARGS__); \
if (rc != cudaSuccess) { \
throw std::runtime_error(std::string(#f) + ": " + cudaGetErrorString(rc)); \
} \
} while (0)
void print_header() {
std::cout << "kernel" << ","
<< "kind" << ","
<< "epoch" << ","
<< "serial" << ","
<< "parallel" << ","
<< "element" << ",";
for (auto rep = 0ul; rep < n_rep; ++rep) std::cout << "t_" << rep << ",";
std::cout << std::endl;
std::cout.flush();
}
void print_result(result& res,
size_t epoch, size_t serial, size_t parallel, size_t element,
const std::string& kind, const std::string& kernel) {
std::cout << kind << ","
<< kernel << ","
<< epoch << ","
<< serial << ","
<< parallel << ","
<< element << ",";
for (const auto& r: res) std::cout << r << ",";
std::cout << std::endl;
std::cout.flush();
}
auto bench_null() {
result res;
for (auto rep = 0; rep < n_rep; ++rep) {
auto t0 = timer::now();
cuda_api(cudaDeviceSynchronize);
cuda_api(cudaDeviceSynchronize);
auto t1 = timer::now();
auto dt = (t1 - t0).count();
res.push_back(dt);
}
return res;
}
template<typename K, typename... As>
auto bench_kernels(const std::string& tag, size_t n_epoch, size_t n_serial, size_t n_parallel, size_t n_element, K kernel, As... as) {
result res;
auto n_block = (n_element + n_thread - 1)/n_thread;
for (auto rep = 0; rep < n_rep; ++rep) {
auto t0 = timer::now();
cuda_api(cudaDeviceSynchronize);
for (auto epoch = 0ul; epoch < n_epoch; ++epoch) {
for (auto serial = 0ul; serial < n_serial; ++serial) {
kernel<<<n_block, n_thread>>>(as...);
}
for (auto parallel = 0ul; parallel < n_parallel; ++parallel) {
kernel<<<n_block, n_thread>>>(as...);
}
}
cuda_api(cudaDeviceSynchronize);
auto t1 = timer::now();
auto dt = (t1 - t0).count();
res.push_back(dt);
}
print_result(res, n_epoch, n_serial, n_parallel, n_element, tag, "kernels");
}
cudaGraphNode_t add_empty_node(cudaGraph_t& graph) {
cudaGraphNode_t node = {0};
cuda_api(cudaGraphAddEmptyNode, &node, graph, nullptr, 0);
return node;
}
template<class K>
cudaGraphNode_t add_kernel_node(cudaGraph_t& graph, size_t n_block, K kernel, const std::vector<void*> args) {
cudaGraphNode_t node = {0};
cudaKernelNodeParams params = {0};
params.func = (void*) kernel;
params.gridDim = n_block;
params.blockDim = n_thread;
params.sharedMemBytes = 0;
params.kernelParams = (void**) args.data();
params.extra = nullptr;
cuda_api(cudaGraphAddKernelNode, &node, graph, nullptr, 0, ¶ms);
return node;
}
template<int N, int ix, typename... Ts>
constexpr void set_args(std::unique_ptr<std::tuple<Ts...>>& values, std::vector<void*>& pointers) {
if constexpr(ix >= N) {
return;
} else {
pointers[ix] = &std::get<ix>(*values);
set_args<N, ix + 1, Ts...>(values, pointers);
}
}
void add_dependencies(cudaGraph_t& graph, const cudaGraphNode_t& from, const cudaGraphNode_t& to) {
cuda_api(cudaGraphAddDependencies, graph, &from, &to, 1);
}
void add_dependencies(cudaGraph_t& graph, const cudaGraphNode_t& from_, const std::vector<cudaGraphNode_t>& to) {
auto n = to.size();
std::vector<cudaGraphNode_t> from(n, from_);
cuda_api(cudaGraphAddDependencies, graph, from.data(), to.data(), n);
}
void add_dependencies(cudaGraph_t& graph, const std::vector<cudaGraphNode_t>& from, const cudaGraphNode_t& to_) {
auto n = from.size();
std::vector<cudaGraphNode_t> to(n, to_);
cuda_api(cudaGraphAddDependencies, graph, from.data(), to.data(), n);
}
template<typename K, typename... As>
auto make_graph(size_t n_epoch, size_t n_serial, size_t n_parallel, size_t n_element, K kernel, As... as) {
std::vector<void*> args(sizeof...(As), nullptr);
auto tmp = std::make_unique<std::tuple<As...>>(as...);
set_args<sizeof...(As), 0, As...>(tmp, args);
cudaGraph_t graph = {0};
cuda_api(cudaGraphCreate, &graph, 0);
auto root = add_empty_node(graph);
auto last = root;
std::vector<cudaGraphNode_t> to_destroy;
auto n_block = (n_element + n_thread - 1)/n_thread;
for (auto epoch = 0ul; epoch < n_epoch; ++epoch) {
for (auto serial = 0ul; serial < n_serial; ++serial) {
auto node = add_kernel_node(graph, n_block, kernel, args);
add_dependencies(graph, last, node);
last = node;
to_destroy.push_back(node);
}
std::vector<cudaGraphNode_t> nodes;
for (auto parallel = 0ul; parallel < n_parallel; ++parallel) {
auto node = add_kernel_node(graph, n_block, kernel, args);
to_destroy.push_back(node);
nodes.push_back(std::move(node));
}
last = add_empty_node(graph);
to_destroy.push_back(last);
add_dependencies(graph, last, nodes);
last = add_empty_node(graph);
to_destroy.push_back(last);
add_dependencies(graph, nodes, last);
}
for (auto& node: to_destroy) cuda_api(cudaGraphDestroyNode, node);
return graph;
}
template<typename K, typename... As>
auto bench_graph(const std::string& tag, size_t n_epoch, size_t n_serial, size_t n_parallel, size_t n_element, K kernel, As... as) {
auto graph = make_graph(n_epoch, n_serial, n_parallel, n_element, kernel, as...);
cudaStream_t stream = {0};
cuda_api(cudaStreamCreate, &stream);
cudaGraphExec_t instance = {0};
cuda_api(cudaGraphInstantiate, &instance, graph, nullptr, nullptr, 0);
result res;
for (auto rep = 0; rep < n_rep; ++rep) {
auto t0 = timer::now();
cuda_api(cudaDeviceSynchronize);
cuda_api(cudaGraphLaunch, instance, stream);
cuda_api(cudaDeviceSynchronize);
auto t1 = timer::now();
auto dt = (t1 - t0).count();
res.push_back(dt);
}
print_result(res, n_epoch, n_serial, n_parallel, n_element, tag, "graphs");
cuda_api(cudaGraphExecDestroy, instance);
cuda_api(cudaGraphDestroy, graph);
cuda_api(cudaStreamDestroy, stream);
}
template<typename K, typename... As>
auto bench_graph_update(const std::string& tag, size_t n_epoch, size_t n_serial, size_t n_parallel, size_t n_element, K kernel, As... as) {
auto graph = make_graph(n_epoch, n_serial, n_parallel, n_element, kernel, as...);
cudaStream_t stream = {0};
cuda_api(cudaStreamCreate, &stream);
cudaGraphExec_t instance = {0};
cuda_api(cudaGraphInstantiate, &instance, graph, nullptr, nullptr, 0);
result res;
for (auto rep = 0; rep < n_rep; ++rep) {
auto update = make_graph(n_epoch, n_serial, n_parallel, n_element, kernel, as...);
cudaGraphNode_t error_node;
cudaGraphExecUpdateResult update_result;
cuda_api(cudaGraphExecUpdate, instance, update, &error_node, &update_result);
cuda_api(cudaGraphDestroy, update);
auto t0 = timer::now();
cuda_api(cudaDeviceSynchronize);
cuda_api(cudaGraphLaunch, instance, stream);
cuda_api(cudaDeviceSynchronize);
auto t1 = timer::now();
auto dt = (t1 - t0).count();
res.push_back(dt);
}
print_result(res, n_epoch, n_serial, n_parallel, n_element, tag, "graphs-update");
cuda_api(cudaGraphExecDestroy, instance);
cuda_api(cudaGraphDestroy, graph);
cuda_api(cudaStreamDestroy, stream);
}
template<typename K, typename... As>
auto bench_graph_split(const std::string& tag, size_t n_epoch, size_t n_serial, size_t n_parallel, size_t n_element, K kernel, As... as) {
auto graph = make_graph(1, n_serial, n_parallel, n_element, kernel, as...);
cudaStream_t stream = {0};
cuda_api(cudaStreamCreate, &stream);
cudaGraphExec_t instance = {0};
cuda_api(cudaGraphInstantiate, &instance, graph, nullptr, nullptr, 0);
result res;
for (auto rep = 0; rep < n_rep; ++rep) {
auto t0 = timer::now();
cuda_api(cudaDeviceSynchronize);
for (auto epoch = 0ul; epoch < n_epoch; ++epoch) {
cuda_api(cudaGraphLaunch, instance, stream);
}
cuda_api(cudaDeviceSynchronize);
auto t1 = timer::now();
auto dt = (t1 - t0).count();
res.push_back(dt);
}
print_result(res, n_epoch, n_serial, n_parallel, n_element, tag, "graphs-split");
cuda_api(cudaGraphExecDestroy, instance);
cuda_api(cudaGraphDestroy, graph);
cuda_api(cudaStreamDestroy, stream);
}
template<typename K, typename... As>
auto bench_graph_split_update(const std::string& tag, size_t n_epoch, size_t n_serial, size_t n_parallel, size_t n_element, K kernel, As... as) {
auto graph = make_graph(1, n_serial, n_parallel, n_element, kernel, as...);
cudaStream_t stream = {0};
cuda_api(cudaStreamCreate, &stream);
cudaGraphExec_t instance = {0};
cuda_api(cudaGraphInstantiate, &instance, graph, nullptr, nullptr, 0);
result res;
for (auto rep = 0; rep < n_rep; ++rep) {
auto t0 = timer::now();
cuda_api(cudaDeviceSynchronize);
for (auto epoch = 0ul; epoch < n_epoch; ++epoch) {
auto update = make_graph(1, n_serial, n_parallel, n_element, kernel, as...);
cudaGraphNode_t error_node;
cudaGraphExecUpdateResult update_result;
cuda_api(cudaGraphExecUpdate, instance, update, &error_node, &update_result);
cuda_api(cudaGraphDestroy, update);
cuda_api(cudaGraphLaunch, instance, stream);
}
cuda_api(cudaDeviceSynchronize);
auto t1 = timer::now();
auto dt = (t1 - t0).count();
res.push_back(dt);
}
print_result(res, n_epoch, n_serial, n_parallel, n_element, tag, "graphs-split-update");
cuda_api(cudaGraphExecDestroy, instance);
cuda_api(cudaGraphDestroy, graph);
cuda_api(cudaStreamDestroy, stream);
}
int main() {
double* x;
double* y;
double alpha;
cuda_api(cudaMalloc, &x, n_element_hi*sizeof(double));
cuda_api(cudaMalloc, &y, n_element_hi*sizeof(double));
print_header();
for (auto n_epoch = n_epoch_lo; n_epoch <= n_epoch_hi; n_epoch *= 2) {
for (auto n_serial = n_serial_lo; n_serial <= n_serial_hi; n_serial *= 2) {
for (auto n_parallel = n_parallel_lo; n_parallel <= n_parallel_hi; n_parallel *= 2) {
for (auto n_element = n_element_lo; n_element <= n_element_hi; n_element *= 2) {
std::cerr << "Kernels"
<< " epochs=" << n_epoch
<< " serial=" << n_serial
<< " parallel=" << n_parallel
<< " element=" << n_element
<< std::endl;
bench_kernels("empty", n_epoch, n_serial, n_parallel, n_element, kernels::empty, n_element);
bench_kernels("axpy", n_epoch, n_serial, n_parallel, n_element, kernels::axpy, y, x, alpha, n_element);
bench_kernels("newton 5", n_epoch, n_serial, n_parallel, n_element, kernels::newton, 5, x, n_element);
std::cerr << "Graphs"
<< " epochs=" << n_epoch
<< " serial=" << n_serial
<< " parallel=" << n_parallel
<< " element=" << n_element
<< std::endl;
bench_graph("empty", n_epoch, n_serial, n_parallel, n_element, kernels::empty, n_element);
bench_graph("axpy", n_epoch, n_serial, n_parallel, n_element, kernels::axpy, y, x, alpha, n_element);
bench_graph("newton 5", n_epoch, n_serial, n_parallel, n_element, kernels::newton, 5, x, n_element);
std::cerr << "Graphs w/ update"
<< " epochs=" << n_epoch
<< " serial=" << n_serial
<< " parallel=" << n_parallel
<< " element=" << n_element
<< std::endl;
bench_graph_update("empty", n_epoch, n_serial, n_parallel, n_element, kernels::empty, n_element);
bench_graph_update("axpy", n_epoch, n_serial, n_parallel, n_element, kernels::axpy, y, x, alpha, n_element);
bench_graph_update("newton 5", n_epoch, n_serial, n_parallel, n_element, kernels::newton, 5, x, n_element);
std::cerr << "Graphs w/ split epoch"
<< " epochs=" << n_epoch
<< " serial=" << n_serial
<< " parallel=" << n_parallel
<< " element=" << n_element
<< std::endl;
bench_graph_split("empty", n_epoch, n_serial, n_parallel, n_element, kernels::empty, n_element);
bench_graph_split("axpy", n_epoch, n_serial, n_parallel, n_element, kernels::axpy, y, x, alpha, n_element);
bench_graph_split("newton 5", n_epoch, n_serial, n_parallel, n_element, kernels::newton, 5, x, n_element);
std::cerr << "Graphs w/ split epoch + update"
<< " epochs=" << n_epoch
<< " serial=" << n_serial
<< " parallel=" << n_parallel
<< " element=" << n_element
<< std::endl;
bench_graph_split_update("empty", n_epoch, n_serial, n_parallel, n_element, kernels::empty, n_element);
bench_graph_split_update("axpy", n_epoch, n_serial, n_parallel, n_element, kernels::axpy, y, x, alpha, n_element);
bench_graph_split_update("newton 5", n_epoch, n_serial, n_parallel, n_element, kernels::newton, 5, x, n_element);
}
}
}
}
}
|
6,283 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <limits.h>
#define TRUE 0
#define FALSE 1
typedef struct {
int src;
int dst;
int cost;
} Edge;
__global__ void bellman_ford_kernel(int *dis_arr, Edge *edges, int *change) {
int my_id;
my_id = blockIdx.x*blockDim.x + threadIdx.x;
Edge my_edge = edges[my_id];
int curr_dis, cand_dis;
int i;
// test code
for (i =0; i < 5; i++) {
printf("%d->%d: %d\n", edges[i].src, edges[i].dst, edges[i].cost);
}
curr_dis = dis_arr[my_edge.dst];
cand_dis = dis_arr[my_edge.src] + my_edge.cost;
if (cand_dis < curr_dis) {
*change = TRUE;
dis_arr[my_edge.dst] = cand_dis;
}
}
int main() {
int *dis_arr;
Edge *edges;
int n = 5; // num vertices
int m = 5; // num edges
dis_arr = (int*)malloc(sizeof(int)*n);
edges = (Edge*)malloc(sizeof(Edge)*m);
int i;
// init edges
Edge e1 = {0,1, 5};
Edge e2 = {0,3, 1};
Edge e3 = {1,2, 3};
Edge e4 = {3,4, 4};
Edge e5 = {2,4, 8};
edges[0] = e1;
edges[1] = e2;
edges[2] = e3;
edges[3] = e4;
edges[4] = e5;
// init dis_arr
// set each val to INT_MAX
for (i=0; i < n; i++)
dis_arr[i] = INT_MAX;
// set source dis
dis_arr[0] = 0;
int *gpu_dis_arr;
Edge *gpu_edges;
cudaMalloc((void**)&gpu_dis_arr, sizeof(int)*n);
cudaMalloc((void**)&gpu_edges, sizeof(Edge)*m);
cudaMemcpy(gpu_dis_arr, dis_arr, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_edges, edges, sizeof(Edge)*m, cudaMemcpyHostToDevice);
int *change;
int *gpu_change;
change = (int*)malloc(sizeof(int));
cudaMalloc((void**)&gpu_change, sizeof(int));
*change = TRUE;
while(change == TRUE) {
//*change = FALSE;
cudaMemcpy(gpu_change, change, sizeof(int)*1, cudaMemcpyHostToDevice);
bellman_ford_kernel<<< 1 , 5 >>>(gpu_dis_arr, gpu_edges, gpu_change);
cudaMemcpy(change, gpu_change, sizeof(int)*1, cudaMemcpyDeviceToHost);
}
cudaMemcpy(dis_arr, gpu_dis_arr, sizeof(int)*n, cudaMemcpyDeviceToHost);
// print dis-arr
printf("Distance array\n");
for (i=0; i < n; i++)
printf("%d ", dis_arr[i]);
}
|
6,284 | /*MIT License
Copyright (c) 2019 Xavier Martinez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#define NBTHREADS 256
#define EMPTYCELL INT_MAX-1
#define PROBERADIUS (1.4f)
#define EPSILON (0.001f)
#include <thrust/binary_search.h>
#include "operators.cu"
#include "MarchingCubes.cu"
struct compare_int2 {
__host__ __device__ __forceinline__ bool operator()(int2 a, int2 b) {return a.x < b.x;}
};
struct compare_int {
__host__ __device__ __forceinline__ bool operator()(int a, int b) {return a < b;}
};
struct compare_float3 {
__device__ __host__ bool operator()(float3 a, float3 b) {
if (a.x <= b.x && a.y <= b.y && a.z < b.z) return true;
else if (a.x <= b.x && a.y < b.y) return true;
else if (a.x < b.x) return true;
else return false;
}
};
struct sort_float3 {
__host__ __device__
bool operator()(const float3 &a, const float3 &b) const {
if (a.x <= b.x && a.y <= b.y && a.z < b.z) return true;
else if (a.x <= b.x && a.y < b.y) return true;
else if (a.x < b.x) return true;
else return false;
}
};
struct compare_int3 {
__host__ __device__ bool operator()(int3 a, int3 b) {
if (a.x <= b.x && a.y <= b.y && a.z < b.z) return true;
else if (a.x <= b.x && a.y < b.y) return true;
else if (a.x < b.x) return true;
else return false;
}
};
struct samefloat3 {
__host__ __device__ bool operator()(float3 a, float3 b) {
if (abs(a.x - b.x) > EPSILON)
return false;
if (abs(a.y - b.y) > EPSILON)
return false;
if (abs(a.z - b.z) > EPSILON)
return false;
return true;
}
};
struct sameint3 {
__host__ __device__ bool operator()(int3 a, int3 b) {
if (a.x != b.x)
return false;
if (a.y != b.y)
return false;
if (a.z != b.z)
return false;
return true;
}
};
template<typename T>
struct lessf3 : public thrust::binary_function<T, T, bool>
{
__host__ __device__ bool operator()(const T &a, const T &b) const {
if (a.x <= b.x && a.y <= b.y && a.z < b.z) return true;
else if (a.x <= b.x && a.y < b.y) return true;
else if (a.x < b.x) return true;
else return false;
}
};
struct add_uint2 {
__device__
uint2 operator()(const uint2& a, const uint2& b) const {
uint2 r;
r.x = a.x + b.x;
r.y = a.y + b.y;
return r;
}
};
struct is_notempty
{
__host__ __device__
bool operator()(const int &x) const
{
return x != EMPTYCELL;
}
};
__global__ void memsetCudaFloat(float *data, float val, int N){
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= N) {
return;
}
data[index] = val;
}
__global__ void memsetCudaInt(int *data, int val, int N) {
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= N) {
return;
}
data[index] = val;
}
__global__ void memsetCudaInt2(int2 *data, int2 val, int N) {
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= N) {
return;
}
data[index] = val;
}
__global__ void memsetCudaUInt2(uint2 *data, uint2 val, int N) {
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= N) {
return;
}
data[index] = val;
}
__host__ __device__ int3 spaceToGrid(float3 pos3D, float3 originGrid, float dx) {
float3 tmp = ((pos3D - originGrid) / dx);
return make_int3(tmp.x, tmp.y, tmp.z);
}
__host__ __device__ float3 gridToSpace(int3 cellPos, float3 originGrid, float dx) {
return (originGrid + (make_float3(cellPos.x, cellPos.y, cellPos.z) * dx) );
//return originGrid + (convert_float3(cellPos) * dx) + (dx / 2.0f);
}
__host__ __device__ int flatten3DTo1D(int3 id3d, int3 gridDim) {
return (gridDim.y * gridDim.z * id3d.x) + (gridDim.z * id3d.y) + id3d.z;
}
// __host__ __device__ int flatten3DTo1D(int3 id3d, int3 gridDim) {
// // return id3d.x + gridDim.x * (id3d.y + gridDim.z * id3d.z);
// return id3d.x + id3d.y * gridDim.x + id3d.z * gridDim.x * gridDim.y;
// }
__host__ __device__ int3 unflatten1DTo3D(int index, int3 gridDim) {
int3 res;
// res.x = index % gridDim.x;
// res.y = (index / gridDim.x) % gridDim.y;
// res.z = index / (gridDim.x * gridDim.y);
res.x = index / (gridDim.y * gridDim.z); //Note the integer division . This is x
res.y = (index - res.x * gridDim.y * gridDim.z) / gridDim.z; //This is y
res.z = index - res.x * gridDim.y * gridDim.z - res.y * gridDim.z; //This is z
return res;
}
inline __host__ __device__ float fast_distance(float3 p1, float3 p2) {
float x = (p1.x - p2.x) * (p1.x - p2.x);
float y = (p1.y - p2.y) * (p1.y - p2.y);
float z = (p1.z - p2.z) * (p1.z - p2.z);
return sqrt( x + y + z);
}
inline __host__ __device__ float sqr_distance(float3 p1, float3 p2) {
float x = (p1.x - p2.x) * (p1.x - p2.x);
float y = (p1.y - p2.y) * (p1.y - p2.y);
float z = (p1.z - p2.z) * (p1.z - p2.z);
return x + y + z;
}
inline __host__ __device__ float3 clamp(float3 f, float a, float b)
{
return make_float3(max(a, min(f.x, b)),
max(a, min(f.y, b)),
max(a, min(f.z, b))
);
}
inline __host__ __device__ int3 clamp(int3 f, int a, int b)
{
return make_int3(max(a, min(f.x, b)),
max(a, min(f.y, b)),
max(a, min(f.z, b))
);
}
inline __host__ __device__ int clamp(int f, int a, int b)
{
return max(a, min(f, b));
}
// inline __device__ int lessThan(float x, float y) {
// return max(sign(y - x), 0);
// }
// calculate cell address as the hash value for each atom
__global__ void hashAtoms(unsigned int natoms,
float4* xyzr,
int3 gridDim,
float4 originGridDDx,
int2 * atomHashIndex,
unsigned natomsnextPow2) {
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= natomsnextPow2)
return;
if (index >= natoms) {
atomHashIndex[index].x = INT_MAX;
atomHashIndex[index].y = INT_MAX;
return;
}
float4 atom = xyzr[index];// read atom coordinate and radius
float3 pos = make_float3(atom.x, atom.y, atom.z);
// compute cell index, clamped to fall within grid bounds
float3 originGrid = make_float3(originGridDDx.x, originGridDDx.y, originGridDDx.z);
float dx = originGridDDx.w;
int3 cell = spaceToGrid(pos, originGrid, dx);
int hash = flatten3DTo1D(cell, gridDim);
atomHashIndex[index].x = hash;// atoms hashed to cell address
atomHashIndex[index].y = index; // original atom index
}
//https://github.com/FROL256/opencl_bitonic_sort_by_key/blob/master/bitonic_sort_gpu.h
inline __device__ int getKey(int2 v) { return v.x; }
inline __device__ int getVal(int2 v) { return v.y; }
inline __device__ bool compare(int2 a, int2 b) { return getKey(a) < getKey(b); }
__global__ void bitonic_pass_kernel(int2* theArray, int stage, int passOfStage, int a_invertModeOn)
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
const int r = 1 << (passOfStage);
const int lmask = r - 1;
const int left = ((j >> passOfStage) << (passOfStage + 1)) + (j & lmask);
const int right = left + r;
const int2 a = theArray[left];
const int2 b = theArray[right];
const bool cmpRes = compare(a, b);
const int2 minElem = cmpRes ? a : b;
const int2 maxElem = cmpRes ? b : a;
const int oddEven = j >> stage;
const bool isSwap = (oddEven & 1) & a_invertModeOn;
const int minId = isSwap ? right : left;
const int maxId = isSwap ? left : right;
theArray[minId] = minElem;
theArray[maxId] = maxElem;
}
__global__ void bitonic_512( int2* theArray, int stage, int passOfStageBegin, int a_invertModeOn)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int lid = threadIdx.x;
int blockId = (tid / 256);
__shared__ int2 s_array[512];
s_array[lid + 0] = theArray[blockId * 512 + lid + 0];
s_array[lid + 256] = theArray[blockId * 512 + lid + 256];
__syncthreads();
for (int passOfStage = passOfStageBegin; passOfStage >= 0; passOfStage--)
{
const int j = lid;
const int r = 1 << (passOfStage);
const int lmask = r - 1;
const int left = ((j >> passOfStage) << (passOfStage + 1)) + (j & lmask);
const int right = left + r;
const int2 a = s_array[left];
const int2 b = s_array[right];
const bool cmpRes = compare(a, b);
const int2 minElem = cmpRes ? a : b;
const int2 maxElem = cmpRes ? b : a;
const int oddEven = tid >> stage; // (j >> stage)
const bool isSwap = (oddEven & 1) & a_invertModeOn;
const int minId = isSwap ? right : left;
const int maxId = isSwap ? left : right;
s_array[minId] = minElem;
s_array[maxId] = maxElem;
__syncthreads();
}
theArray[blockId * 512 + lid + 0] = s_array[lid + 0];
theArray[blockId * 512 + lid + 256] = s_array[lid + 256];
}
__global__ void sortCell(unsigned int natoms, float4 *xyzr, int2 *atomHashIndex,
float4 *sorted_xyzr,
int2 *cellStartEnd) {
int hash;
int id;
// __local int local_hash[NBTHREADS + 1];
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= natoms)
return;
hash = atomHashIndex[index].x;
id = atomHashIndex[index].y;
int hashm1;
if (index != 0)
hashm1 = atomHashIndex[index - 1].x;
else
hashm1 = hash;
if (index == 0 || hash != hashm1) {
cellStartEnd[hash].x = index; // set start
if (index > 0)
cellStartEnd[hashm1].y = index; // set end
}
if (index == natoms - 1) {
cellStartEnd[hash].y = index + 1; // set end
}
// Reorder atoms according to sorted indices
sorted_xyzr[index] = xyzr[id];
}
inline __host__ __device__ float computeInOrOut(int3 id3DNeigh, int2 *cellStartEnd, float4 *sorted_xyzr, float3 spacePosSES, float dx, int3 gridDimNeighbor) {
//Loop over neighbors to compute if the cell is:
// 1) - ouside => result = PROBERADIUS
// 2) - inside => result = -dx
// 3) - at the border => result = 0.0
int3 curgridId;
float result = PROBERADIUS;
bool nearProbe = false;
// #pragma unroll
for (int x = -1; x <= 1; x++) {
curgridId.x = clamp(id3DNeigh.x + x, 0, gridDimNeighbor.x - 1);
// #pragma unroll
for (int y = -1; y <= 1; y++) {
curgridId.y = clamp(id3DNeigh.y + y, 0, gridDimNeighbor.x - 1);
// #pragma unroll
for (int z = -1; z <= 1; z++) {
curgridId.z = clamp(id3DNeigh.z + z, 0, gridDimNeighbor.x - 1);
int neighcellhash = flatten3DTo1D(curgridId, gridDimNeighbor);
int idStart = cellStartEnd[neighcellhash].x;
int idStop = cellStartEnd[neighcellhash].y;
if (idStart < EMPTYCELL) {
for (int id = idStart; id < idStop; id++) {
float4 xyzr = sorted_xyzr[id];
float rad = xyzr.w;
float3 pos = make_float3(xyzr.x, xyzr.y, xyzr.z);
// float d = sqr_distance(pos, spacePosSES);
float d = fast_distance(pos, spacePosSES);
// minDist = min(d, minDist);
// if (d < rad * rad)
if (d < rad - dx)
return -dx;
// if (d < (PROBERADIUS + rad) * (PROBERADIUS + rad))
if (d < (PROBERADIUS + rad) )
nearProbe = true;
}
}
}
}
}
//float result = (1 - nearProbe) * PROBERADIUS;
if (nearProbe)
result = 0.0f;
return result;
}
__global__ void probeIntersection(int * checkFill,
int2 * atomHashIndex,
int3 gridDimNeighbor,
float4 originGridNeighborDDx,
int3 gridDimSES,
int3 sliceGridDimSES,
// int3 smallsliceGridDimSES,
float4 originGridSESDx,
int2 *cellStartEnd,
float4 * sorted_xyzr,
float *gridValues,
int3 offsetGrid,
unsigned int N,
int sliceNb) {
// Get global position in X direction
unsigned int i = (threadIdx.x + blockIdx.x * blockDim.x);
// Get global position in Y direction
unsigned int j = (threadIdx.y + blockIdx.y * blockDim.y);
// Get global position in Z direction
unsigned int k = (threadIdx.z + blockIdx.z * blockDim.z);
int3 ijk = make_int3(i, j, k);
if (i >= sliceGridDimSES.x - 1)
return;
if (j >= sliceGridDimSES.y - 1)
return;
if (k >= sliceGridDimSES.z - 1)
return;
int hash = flatten3DTo1D(ijk, sliceGridDimSES);
// if(hash >= sliceNb)
// return;
int3 ijkOffset = make_int3(i + offsetGrid.x, j + offsetGrid.y, k + offsetGrid.z);
int hashOffset = flatten3DTo1D(ijkOffset, gridDimSES);
if (ijkOffset.x >= gridDimSES.x - 1)
return;
if (ijkOffset.y >= gridDimSES.y - 1)
return;
if (ijkOffset.z >= gridDimSES.z - 1)
return;
float3 originGridNeighbor = make_float3(originGridNeighborDDx.x, originGridNeighborDDx.y, originGridNeighborDDx.z);
float dxNeighbor = originGridNeighborDDx.w;
float dxSES = originGridSESDx.w;
float3 spacePos3DCellSES = gridToSpace(ijkOffset, originGridNeighbor, dxSES);
// id of the current cell in the neighbor grid
int3 gridPos3DCellNeighbor = spaceToGrid(spacePos3DCellSES, originGridNeighbor, dxNeighbor);
//Loop over neighbors to compute if the cell is:
// 1) - ouside => result = PROBERADIUS
// 2) - inside => result = -dx
// 3) - at the border => result = 0.0
float result = computeInOrOut(gridPos3DCellNeighbor, cellStartEnd, sorted_xyzr, spacePos3DCellSES, dxSES, gridDimNeighbor);
//If should process => record id, if not record a large number
int fill = EMPTYCELL;
int range = (int)ceil(PROBERADIUS / dxSES);
if (abs(result) < EPSILON){
fill = hash;
// if(i < range || j < range || k < range || i > smallsliceGridDimSES.x + range || j > smallsliceGridDimSES.y + range || k > smallsliceGridDimSES.z + range){
// fill = EMPTYCELL;
// }
}
checkFill[hash] = fill;
gridValues[hash] = result;
}
__global__ void distanceFieldRefine(int * checkFill, int2 * atomHashIndex, int3 gridDimNeighbor,
float4 originGridNeighborDDx, int3 gridDimSES, int3 sliceGridDimSES,
float4 originGridSESDx, int2 * cellStartEnd, float4 * sorted_xyzr,
float *gridValues, unsigned int totalCells, unsigned int notEmptyCells,
int3 offsetGrid, unsigned int of) {
int id = (threadIdx.x + blockIdx.x * blockDim.x) + of;
if (id >= notEmptyCells) {
return;
}
int hash = checkFill[id];
int3 ijk = unflatten1DTo3D(hash, sliceGridDimSES);
//From slice to real id in the full size grid
int3 ijkOffset = make_int3(ijk.x + offsetGrid.x, ijk.y + offsetGrid.y, ijk.z + offsetGrid.z);
int hashOffset = flatten3DTo1D(ijkOffset, gridDimSES);
float3 originGridSES = make_float3(originGridSESDx.x, originGridSESDx.y, originGridSESDx.z);
float dxSES = originGridSESDx.w;
float3 spacePos3DCellSES = gridToSpace(ijkOffset, originGridSES, dxSES);
float newresult = -dxSES;
const int idSESRangeToSearch = (int)ceil(PROBERADIUS / dxSES);
const float pme = PROBERADIUS - EPSILON;
float minDist = 100000.0f;
int3 curgridSESId;
//Find the closest outside SES cell in the range [-probeRadius, +probeRadius]
// #pragma unroll
for (int x = -idSESRangeToSearch; x <= idSESRangeToSearch; x++) {
curgridSESId.x = clamp(ijk.x + x , 0, sliceGridDimSES.x - 1);
// #pragma unroll
for (int y = -idSESRangeToSearch; y <= idSESRangeToSearch; y++) {
curgridSESId.y = clamp(ijk.y + y , 0, sliceGridDimSES.y - 1);
// #pragma unroll
for (int z = -idSESRangeToSearch; z <= idSESRangeToSearch; z++) {
curgridSESId.z = clamp(ijk.z + z , 0, sliceGridDimSES.z - 1);
int curgrid1DSESId = flatten3DTo1D(curgridSESId, sliceGridDimSES);
if (gridValues[curgrid1DSESId] > pme) {//Outside
int3 curgrid3DSESIdOffset = make_int3(curgridSESId.x + offsetGrid.x,
curgridSESId.y + offsetGrid.y,
curgridSESId.z + offsetGrid.z);
float3 spacePosSES = gridToSpace(curgrid3DSESIdOffset, originGridSES, dxSES);
//Distance from our current grid cell to the outside grid cell
float d = fast_distance(spacePosSES, spacePos3DCellSES);
minDist = min(d, minDist);
}
}
}
}
if (minDist < 999.0f)
newresult = PROBERADIUS - minDist;
gridValues[hash] = newresult;
}
inline __host__ __device__ int computeClosestAtom(float3 vert, int3 id3DNeigh, int2 *cellStartEnd, float4 *sorted_xyzr, int3 gridDimNeighbor) {
int closestId = -1;
float minD = 999999.0f;
int3 curgridId;
// #pragma unroll
for (int x = -1; x <= 1; x++) {
curgridId.x = clamp(id3DNeigh.x + x, 0, gridDimNeighbor.x - 1);
// #pragma unroll
for (int y = -1; y <= 1; y++) {
curgridId.y = clamp(id3DNeigh.y + y, 0, gridDimNeighbor.x - 1);
// #pragma unroll
for (int z = -1; z <= 1; z++) {
curgridId.z = clamp(id3DNeigh.z + z, 0, gridDimNeighbor.x - 1);
int neighcellhash = flatten3DTo1D(curgridId, gridDimNeighbor);
int idStart = cellStartEnd[neighcellhash].x;
int idStop = cellStartEnd[neighcellhash].y;
if (idStart < EMPTYCELL) {
for (int id = idStart; id < idStop; id++) {
float4 xyzr = sorted_xyzr[id];
float3 pos = make_float3(xyzr.x, xyzr.y, xyzr.z);
// float d = sqr_distance(pos, vert);
float d = fast_distance(pos, vert);
if(d < minD){
minD = d;
closestId = id;
}
}
}
}
}
}
return closestId;
}
__global__ void closestAtomPerVertex(int *atomIdPerVert, float3 *vertices, unsigned int Nvert, int3 gridDimNeighbor,
float4 originGridNeighborDDx,
float4 originGridSESDx, int2 * cellStartEnd, float4 * sorted_xyzr) {
int id = (threadIdx.x + blockIdx.x * blockDim.x);
if(id >= Nvert)
return;
float3 vert = vertices[id];
float3 originGridNeighbor = make_float3(originGridNeighborDDx.x, originGridNeighborDDx.y, originGridNeighborDDx.z);
float dxNeighbor = originGridNeighborDDx.w;
int3 gridPos3DCellNeighbor = spaceToGrid(vert, originGridNeighbor, dxNeighbor);
atomIdPerVert[id] = computeClosestAtom(vert, gridPos3DCellNeighbor, cellStartEnd, sorted_xyzr, gridDimNeighbor);
}
__global__ void resetGridValuesSlice(const int3 offset, const int rangeSearchRefine, const int3 sliceGridDimSES, float *gridValues){
// Get global position in X direction
unsigned int i = (threadIdx.x + blockIdx.x * blockDim.x);
// Get global position in Y direction
unsigned int j = (threadIdx.y + blockIdx.y * blockDim.y);
// Get global position in Z direction
unsigned int k = (threadIdx.z + blockIdx.z * blockDim.z);
int3 ijk = make_int3(i, j, k);
if (i >= sliceGridDimSES.x - 1)
return;
if (j >= sliceGridDimSES.y - 1)
return;
if (k >= sliceGridDimSES.z - 1)
return;
int hash = flatten3DTo1D(ijk, sliceGridDimSES);
if(i >= sliceGridDimSES.x - rangeSearchRefine)
gridValues[hash] = PROBERADIUS;
if(j >= sliceGridDimSES.y - rangeSearchRefine)
gridValues[hash] = PROBERADIUS;
if(k >= sliceGridDimSES.z - rangeSearchRefine)
gridValues[hash] = PROBERADIUS;
if(offset.x != 0){
if(i < rangeSearchRefine)
gridValues[hash] = PROBERADIUS;
}
else{
if(i >= sliceGridDimSES.x - rangeSearchRefine * 2)
gridValues[hash] = PROBERADIUS;
}
if(offset.y != 0){
if(j < rangeSearchRefine)
gridValues[hash] = PROBERADIUS;
}
else{
if(j >= sliceGridDimSES.y - rangeSearchRefine * 2)
gridValues[hash] = PROBERADIUS;
}
if(offset.z != 0){
if(k < rangeSearchRefine)
gridValues[hash] = PROBERADIUS;
}
else{
if(k >= sliceGridDimSES.z - rangeSearchRefine * 2)
gridValues[hash] = PROBERADIUS;
}
} |
6,285 | ////#include<helper_cuda.h>
////#include<cuda_runtime.h>
////#include<device_launch_parameters.h>
////#include<iostream>
////#include<cmath>
////#include<ctime>
////
////
////__global__ void add_cuk(float* x, float* y, float* z, int Num)
////{
//// int index = blockIdx.x * blockDim.x + threadIdx.x;
////
//// if (index < Num)
//// {
//// z[index] = x[index] + y[index];
//// }
////}
////
////void add(float* x, float* y, float* z, int Num)
////{
//// for (int i = 0; i < Num; i++)
//// {
//// z[i] = x[i] + y[i];
//// }
////}
////
////int main()
////{
//// clock_t beg, end;
////
////
//// int Num = 500000000;
//// int bytes = Num * sizeof(float);
////
//// dim3 block(128, 1);
//// dim3 cuda_grid_size = dim3(static_cast<int>(ceil((Num + block.x - 1) / block.x)), 1);
////
//// float* dx = NULL;
//// float* dy = NULL;
//// float* dz = NULL;
//// float* hx = NULL;
//// float* hy = NULL;
//// float* hz = NULL;
////
//// hx = new float[Num];
//// hy = new float[Num];
//// hz = new float[Num];
////
//// for (int i = 0; i < Num; i++)
//// {
//// hx[i] = 1.01;
//// hy[i] = 1.02;
//// hz[i] = 1.03;
//// }
////
//// cudaMalloc((void**)&dx, bytes);
//// cudaMalloc((void**)&dy, bytes);
//// cudaMalloc((void**)&dz, bytes);
////
//// cudaMemcpy(dx, hx, bytes, cudaMemcpyHostToDevice);
//// cudaMemcpy(dy, hy, bytes, cudaMemcpyHostToDevice);
//// cudaMemcpy(dz, hz, bytes, cudaMemcpyHostToDevice);
////
//// beg = clock();
//// add_cuk << <cuda_grid_size, block >> > (dx, dy, dz, Num);
////
//// cudaThreadSynchronize();
//// end = clock();
////
//// cudaFree(dx);
//// cudaFree(dy);
//// cudaFree(dz);
////
//// delete[]hx;
//// delete[]hy;
//// delete[]hz;
////
//// double time = static_cast<double>((end - beg)) / CLOCKS_PER_SEC;
////
//// std::cout << "GPU cost : " << time << std::endl;
////
////
//// clock_t start, finish;
////
////
////
//// hx = new float[Num];
//// hy = new float[Num];
//// hz = new float[Num];
////
//// for (int i = 0; i < Num; i++)
//// {
//// hx[i] = 1.01;
//// hy[i] = 1.02;
//// hz[i] = 1.03;
//// }
////
//// start = clock();
//// add(hx, hy, hz, Num);
////
//// finish = clock();
//// delete[]hx;
//// delete[]hy;
//// delete[]hz;
////
////
//// double time_cpu = static_cast<double>((finish - start)) / CLOCKS_PER_SEC;
////
//// std::cout << "CPU cost : " << time_cpu << std::endl;
////
//// std::cout << "Accelaration rate : " << time_cpu / time << std::endl;
////
//// return 0;
////}
//
//
//
//
//#include<iostream>
//#include<helper_cuda.h>
//#include<cuda_runtime.h>
//#include<device_launch_parameters.h>
//
//__global__ void add_cuk(float* a, int N)
//{
//
// int index = blockDim.x * blockIdx.x + threadIdx.x;
// if (index < N)
// {
// a[index] += 1;
// }
//}
//
//
//int main()
//{
// int n = 30;
// int bytes = n * sizeof(float);
//
// float* da = NULL;
// float* ha = NULL;
//
// //ha = (float*)malloc(bytes);
// ha = new float[n];
// cudaMalloc((void**)&da, bytes);
//
// for (int i = 0; i < n; i++)
// {
// ha[i] = i + 1;
// std::cout << "ha[ " << i << " ] = " << ha[i] << std::endl;
// }
//
// dim3 block(16, 1);
// dim3 cuda_grid_size((n + block.x - 1) / block.x, 1);
// cudaMemcpy(da, ha, bytes, cudaMemcpyHostToDevice);
//
// add_cuk << <cuda_grid_size, block >> > (da, n);
// cudaDeviceSynchronize();
//
// cudaMemcpy(ha, da, bytes, cudaMemcpyDeviceToHost);
//
// std::cout << "*****************The data from GPUs*****************" << std::endl;
// for (int i = 0; i < n; i++)
// {
// ha[i] = i + 1;
// std::cout << "ha[ " << i << " ] = " << ha[i] << std::endl;
// }
//
// //free(ha);
// delete[]ha;
// cudaFree(da);
// return 0;
//}
//#include <thrust/host_vector.h>
//#include <thrust/device_vector.h>
//#include <thrust/generate.h>
//#include <thrust/sort.h>
//#include <thrust/copy.h>
//#include <algorithm>
//#include <vector>
//#include <time.h>
//
//int main(void)
//{
// thrust::host_vector<int> h_vec(1024 * 1024);
// std::generate(h_vec.begin(), h_vec.end(), rand);
//
// std::vector<int> vec(h_vec.size());
// thrust::copy(h_vec.begin(), h_vec.end(), vec.begin());
//
// thrust::device_vector<int> d_vec = h_vec;
//
// clock_t time1, time2;
//
// time1 = clock();
// thrust::sort(d_vec.begin(), d_vec.end());
// time2 = clock();
// std::cout << (double)(time2 - time1) / CLOCKS_PER_SEC << std::endl;
//
// time1 = clock();
// std::sort(vec.begin(), vec.end());
// time2 = clock();
// std::cout << (double)(time2 - time1) / CLOCKS_PER_SEC << std::endl;
//
// time1 = clock();
// thrust::sort(h_vec.begin(), h_vec.end());
// time2 = clock();
// std::cout << (double)(time2 - time1) / CLOCKS_PER_SEC << std::endl;
//
// return 0;
//}
//#include <thrust/host_vector.h>
//#include <thrust/device_vector.h>
//
//#include <iostream>
//
//int main(void)
//{
// // H has storage for 4 integers
// thrust::host_vector<int> H(4);
//
// // initialize individual elements
// H[0] = 14;
// H[1] = 20;
// H[2] = 38;
// H[3] = 46;
//
// // H.size() returns the size of vector H
// std::cout << "H has size " << H.size() << std::endl;
//
// // print contents of H
// for (int i = 0; i < H.size(); i++)
// std::cout << "H[" << i << "] = " << H[i] << std::endl;
//
// // resize H
// H.resize(2);
//
// std::cout << "H now has size " << H.size() << std::endl;
//
// // Copy host_vector H to device_vector D
// thrust::device_vector<int> D = H;
//
// // elements of D can be modified
// D[0] = 99;
// D[1] = 88;
//
// // print contents of D
// for (int i = 0; i < D.size(); i++)
// std::cout << "D[" << i << "] = " << D[i] << std::endl;
//
// // H and D are automatically deleted when the function returns
// return 0;
//}
//#include <iostream>
//#include <cstdlib>
//#include <time.h>
//#include <algorithm>
//#include<cuda_runtime.h>
//#include<helper_cuda.h>
//#include<device_launch_parameters.h>
//#include<device_functions.h>
//
//
//using namespace std;
//
//#define NUM_ELEMENT 4096
//#define NUM_LISTS 128
//
//typedef enum {
// cpu_sort = 0,
// gpu_merge_1, //Ͱ+̺߳ϲ
// gpu_merge_all, //Ͱ+̺߳ϲ
// gpu_merge_reduction, //Ͱ+Լϲ
// gpu_merge_reduction_modified, //Ͱ+ŻķֿԼϲ
//}gpu_calc_type;
//
//template<class T> void c_swap(T& x, T& y) { T tmp = x; x = y; y = tmp; }
//
//unsigned int srcData[NUM_ELEMENT];
//
//void gen_and_shuffle(unsigned int* const srcData)
//{
// for (int i = 0; i < NUM_ELEMENT; i++) //ɲظ
// srcData[i] = i;
// for (int i = 0; i < NUM_ELEMENT; i++)
// c_swap(srcData[rand() % NUM_ELEMENT], srcData[i]);
// return;
//}
//
//void print_data(unsigned int* const srcData)
//{
// for (int i = 0; i < NUM_ELEMENT; i++)
// {
// printf("%4u", srcData[i]);
// if ((i + 1) % 32 == 0)
// printf("\n");
// }
//}
//
//__device__ void copy_data_in_gpu(const unsigned int* const srcData,
// unsigned int* const dstData,
// const unsigned int tid)
//{
// for (int i = 0; i < NUM_ELEMENT; i += NUM_LISTS)
// dstData[i + tid] = srcData[i + tid]; //п
// __syncthreads();
//}
//
//__device__ void radix_sort2(unsigned int* const sort_tmp,
// unsigned int* const sort_tmp_1,
// const unsigned int tid) //Ͱ
//{
// for (unsigned int bit_mask = 1; bit_mask > 0; bit_mask <<= 1) //32λ
// {
// unsigned int base_cnt_0 = 0;
// unsigned int base_cnt_1 = 0;
//
// for (unsigned int i = 0; i < NUM_ELEMENT; i += NUM_LISTS)
// {
// if (sort_tmp[i + tid] & bit_mask) //λ1ŵsort_tmp_1
// {
// sort_tmp_1[base_cnt_1 + tid] = sort_tmp[i + tid];
// base_cnt_1 += NUM_LISTS;
// }
// else //λ0ŵsort_tmpǰ
// {
// sort_tmp[base_cnt_0 + tid] = sort_tmp[i + tid];
// base_cnt_0 += NUM_LISTS;
// }
// }
//
// for (unsigned int i = 0; i < base_cnt_1; i += NUM_LISTS) //sort_tmp_1ݷŵsort_tmp
// {
// sort_tmp[base_cnt_0 + i + tid] = sort_tmp_1[i + tid];
// }
// __syncthreads();
// }
//}
//
//__device__ void merge_1(unsigned int* const srcData,
// unsigned int* const dstData,
// const unsigned int tid) //̺߳ϲ
//{
// __shared__ unsigned int list_index[NUM_LISTS]; //ʹ__shared__ĻͻᴴڼĴУĴռ䲻ᴴȫڴУ
// list_index[tid] = tid; //ʹö̳߳ʼ
// __syncthreads();
//
// if (tid == 0) //ʹõ߳merge
// {
// for (int i = 0; i < NUM_ELEMENT; i++) //ִNUM_ELEMENT
// {
// unsigned int min_val = 0xFFFFFFFF;
// unsigned int min_idx = 0;
// for (int j = 0; j < NUM_LISTS; j++) //ÿlistͷָ
// {
// if (list_index[j] >= NUM_ELEMENT) //бѾ
// continue;
// if (srcData[list_index[j]] < min_val)
// {
// min_val = srcData[list_index[j]];
// min_idx = j;
// }
// }
// list_index[min_idx] += NUM_LISTS; //СǸָһλ
// dstData[i] = min_val;
// }
// }
//}
//
//__device__ void merge_atomicMin(unsigned int* const srcData,
// unsigned int* const dstData,
// const unsigned int tid) //̺߳ϲ
//{
// unsigned int self_index = tid;
//
// for (int i = 0; i < NUM_ELEMENT; i++)
// {
// __shared__ unsigned int min_val;
// unsigned int self_data = 0xFFFFFFFF;
//
// if (self_index < NUM_ELEMENT)
// {
// self_data = srcData[self_index];
// }
//
// __syncthreads();
//
// atomicMin(&min_val, self_data);
//
// if (min_val == self_data)
// {
// self_index += NUM_LISTS;
// dstData[i] = min_val;
// min_val = 0xFFFFFFFF;
// }
//
// }
//}
//
//__device__ void merge_two(unsigned int* const srcData,
// unsigned int* dstData,
// const unsigned int tid) //Լϲ
//{
// unsigned int self_index = tid;
// __shared__ unsigned int data[NUM_LISTS];
// __shared__ unsigned int tid_max;
//
// for (int i = 0; i < NUM_ELEMENT; i++)
// {
// data[tid] = 0xFFFFFFFF;
//
// if (self_index < NUM_ELEMENT)
// {
// data[tid] = srcData[self_index];
// }
//
// if (tid == 0)
// {
// tid_max = NUM_LISTS >> 1;
// }
//
// __syncthreads();
//
// while (tid_max > 0)
// {
// if (tid < tid_max)
// {
// if (data[tid] > data[tid + tid_max]) //СĻǰ
// {
// data[tid] = data[tid + tid_max];
// }
// }
// __syncthreads();
// if (tid == 0) //Ϊʲôõһ̴߳
// {
// tid_max >>= 1;
// }
// __syncthreads();
// }
//
// if (srcData[self_index] == data[0])
// {
// dstData[i] = data[0];
// self_index += NUM_LISTS;
// }
// }
//}
//
//#define REDUCTION_SIZE 8
//#define REDUCTION_SHIFT 3
//
//__device__ void merge_final(unsigned int* const srcData,
// unsigned int* const dstData,
// const unsigned int tid) //ֿĹԼϲ
//{
// __shared__ unsigned int min_val_reduction[NUM_LISTS / REDUCTION_SIZE];
// unsigned int s_tid = tid >> REDUCTION_SHIFT;
// unsigned int self_index = tid;
// __shared__ unsigned int min_val;
//
// for (int i = 0; i < NUM_ELEMENT; i++)
// {
// unsigned int self_data = 0xFFFFFFFF;
//
// if (self_index < NUM_ELEMENT)
// {
// self_data = srcData[self_index];
// }
//
// if (tid < NUM_LISTS / REDUCTION_SIZE)
// {
// min_val_reduction[tid] = 0xFFFFFFFF;
// }
//
// __syncthreads();
//
// atomicMin(&(min_val_reduction[s_tid]), self_data); //ֿԼ
//
// __syncthreads();
//
// if (tid == 0)
// {
// min_val = 0xFFFFFFFF;
// }
//
// __syncthreads();
//
// if (tid < NUM_LISTS / REDUCTION_SIZE)
// {
// atomicMin(&min_val, min_val_reduction[tid]); //ԼֵٹԼ
// }
//
// __syncthreads();
//
// if (min_val == self_data)
// {
// dstData[i] = min_val;
// self_index += NUM_LISTS;
// min_val = 0xFFFFFFFF;
// }
//
// }
//}
//
//__global__ void sortincuda(unsigned int* const data, gpu_calc_type type)
//{
// const unsigned int tid = threadIdx.x;
// __shared__ unsigned int sort_tmp[NUM_ELEMENT], sort_tmp_1[NUM_ELEMENT];
//
// copy_data_in_gpu(data, sort_tmp, tid); //ΪҪȡд룬˿ݵڴԼ
//
// radix_sort2(sort_tmp, sort_tmp_1, tid); //Ͱ
//
// switch (type)
// {
// case cpu_sort: break;
// case gpu_merge_1: merge_1(sort_tmp, data, tid); break; //̺߳ϲ
// case gpu_merge_all: merge_atomicMin(sort_tmp, data, tid); break; //̺߳ϲ
// case gpu_merge_reduction: merge_two(sort_tmp, data, tid); break; //Լϲ
// case gpu_merge_reduction_modified: merge_final(sort_tmp, data, tid); break; //ֿԼϲ
// default: break;
// }
//}
//
//int main(void)
//{
// gen_and_shuffle(srcData);
// //print_data(srcData);
//
// //printf("\n\n");
//
// unsigned int* gpu_srcData;
//
// cudaMalloc((void**)&gpu_srcData, sizeof(unsigned int) * NUM_ELEMENT);
// cudaMemcpy(gpu_srcData, srcData, sizeof(unsigned int) * NUM_ELEMENT, cudaMemcpyHostToDevice);
//
// clock_t start, end;
//
// for (gpu_calc_type type = cpu_sort; type <= gpu_merge_reduction_modified; type = (gpu_calc_type)(type + 1))
// {
// if (type != cpu_sort) //gpu
// {
// start = clock();
// sortincuda << <1, NUM_LISTS >> > (gpu_srcData, type);
// cudaDeviceSynchronize();
// end = clock();
// printf("type %d use time %.8lf\n", type, (double)(end - start) / CLOCKS_PER_SEC);
// }
// else //cpu
// {
// start = clock();
// sort(srcData, srcData + NUM_ELEMENT);
// end = clock();
// printf("type %d use time %.8lf\n", type, (double)(end - start) / CLOCKS_PER_SEC);
// }
// }
//
// cudaMemcpy(srcData, gpu_srcData, sizeof(unsigned int) * NUM_ELEMENT, cudaMemcpyDeviceToHost);
// //print_data(srcData);
//
// cudaFree(gpu_srcData);
//
//
// return 0;
//
//}
//#include<cuda.h>
//#include<cuda_runtime.h>
//#include<device_launch_parameters.h>
//#include<helper_cuda.h>
//#include<thrust/device_vector.h>
//#include<thrust/host_vector.h>
//#include<thrust/sort.h>
//#include<iostream>
//
//
//__global__ void copy_cuk(float* d_a, int Num)
//{
// int index = blockDim.x * blockIdx.x + threadIdx.x;
//
// if (index < Num)
// {
// d_a[index] = index;
// }
//}
//
//int main()
//{
// int Num = 10;
//
// float* d_a = NULL;
// float* h_a = NULL;
//
// h_a = new float[Num];
// cudaMalloc((void**)&d_a, Num * sizeof(float));
//
// //thrust::host_vector<int> Host_array(Num);
//
// //for (int i = 0; i < Num; i++)
// //{
// // Host_array[i] = Num - i;
// //}
//
// //thrust::device_vector<int> Device_array = Host_array;
// thrust::device_vector<int> Device_array(Num);
//
//
//
// dim3 block(128, 1);
// dim3 grid_size((Num + block.x - 1) / block.x, 1);
//
// copy_cuk << <grid_size, block >> > ( d_a, Num);
//
// cudaDeviceSynchronize();
// cudaMemcpy(h_a, d_a, Num * sizeof(float), cudaMemcpyDeviceToHost);
//
// for (int i = 0; i < Num; i++)
// {
// //d_a[i] = Num - i;
// Device_array[i] = h_a[i];
// }
//
// thrust::sort(Device_array.begin(), Device_array.end(), thrust::greater<int>());
//
// for (int i = 0; i < Device_array.size(); i++)
// {
// std::cout << "D[" << i << "] = " << Device_array[i] << std::endl;
// }
//
// delete[]h_a;
// cudaFree(d_a);
// Device_array.clear();
//
// return 0;
//}
//#include<iostream>
//#include<cuda_runtime.h>
//#include<device_launch_parameters.h>
//#include<helper_cuda.h>
//#include<thrust/device_vector.h>
//#include<thrust/host_vector.h>
//#include<thrust/sort.h>
//
//int main()
//{
// float* h_a = NULL;
// float* d_a = NULL;
//
// int n = 10;
//
// h_a = (float*)malloc(sizeof(float) * n);
// cudaMalloc((void**)&d_a, sizeof(float) * n);
//
// for (int i = 0; i < n; i++)
// {
// h_a[i] = i + 1;
// }
//
// cudaMemcpy(d_a, h_a, sizeof(float) * n, cudaMemcpyHostToDevice);
//
// float* hh_a = new float[n];
// cudaMemcpy(hh_a, d_a, sizeof(float) * n, cudaMemcpyDeviceToHost);
//
//
// for (int i = 0; i < n; i++)
// {
// std::cout << "hh_a[" << i << "] = " << hh_a[i] << std::endl;
// }
//
// delete[]hh_a;
// cudaFree(d_a);
// free(h_a);
//
// return 0;
//}
////ڴ濽forѭֵٶȶԱȣ
//#include<iostream>
//#include<ctime>
//
//int main()
//{
// int num = 100000000;
//
// float* a = new float[num];
// float* b = new float[num];
//
// for (int i = 0; i < num; i++)
// {
// a[i] = 100.0f;
// }
//
// std::clock_t start, finish;
//
// start = std::clock();
// for (int i = 0; i < num; i++)
// {
// b[i] = a[i];
// }
// finish = std::clock();
//
// std::cout << "For loop cost : " << double(finish - start) / (CLOCKS_PER_SEC) << std::endl;
//
// std::clock_t beg, end;
//
// beg = std::clock();
// memcpy(b, a, num * sizeof(int));
// end = std::clock();
//
// std::cout << "Copy cost : " << double(end - beg) / (CLOCKS_PER_SEC) << std::endl;
//
// delete[]a;
// delete[]b;
//
// return 0;
//}
//
//#include<iostream>
//#include<string>
//#include<vector>
//#include<thrust/sort.h>
//#include<thrust/device_ptr.h>
//#include<cuda_runtime.h>
//#include<helper_cuda.h>
//#include<cooperative_groups.h>
//
//
//int main()
//{
// int* h_a = NULL;
// int* d_a = NULL;
// int* h_idx = NULL;
// int* d_idx = NULL;
//
// h_a = new int[10];
// h_idx = new int[10];
// cudaMalloc((void**)&d_a, 10 * sizeof(int));
// cudaMalloc((void**)&d_idx, 10 * sizeof(int));
//
// //Initialize h_a[]
// for (int i = 0; i < 10; i++)
// {
// h_a[i] = rand();
// h_idx[i] = i + 1;
//
// std::cout << "h_idx[" << i << "] = " << h_idx[i] << "\t";
// std::cout << "h_a[" << i << "] = " << h_a[i] << std::endl;
// }
//
// std::cout << std::endl;
// std::cout << "----------------------------------------------" << std::endl;
// std::cout << "------------------Sort Array------------------" << std::endl;
// std::cout << "----------------------------------------------" << std::endl;
// std::cout << std::endl;
//
// cudaMemcpy(d_a, h_a, 10 * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(d_idx, h_idx, 10 * sizeof(int), cudaMemcpyHostToDevice);
//
// thrust::sort_by_key(
// thrust::device_ptr<int>(d_a),
// thrust::device_ptr<int>(d_a + 10),
// thrust::device_ptr<int>(d_idx));
//
// cudaMemcpy(h_a, d_a, 10 * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(h_idx, d_idx, 10 * sizeof(int), cudaMemcpyDeviceToHost);
//
//
// for (int i = 0; i < 10; i++)
// {
// std::cout << "h_idx[" << i << "] = " << h_idx[i] << "\t";
// std::cout << "h_a[" << i << "] = " << h_a[i] << std::endl;
// }
//
// //Free memory
// delete[]h_a;
// delete[]h_idx;
//
// cudaFree(d_a);
// cudaFree(d_idx);
//
// return 0;
//}
//#include<iostream>
//#include<cuda_runtime.h>
//#include<string>
//
//
//int main() {
//
// int* h_a = NULL;
// int* h_b = NULL;
// int* d_a = NULL;
// int* d_b = NULL;
//
// int num = 10;
// size_t size = num * sizeof(int);
//
// h_a = new int[10];
// h_b = new int[10];
// cudaMalloc((void**)&d_a, size);
// cudaMalloc((void**)&d_b, size);
//
// for (int i = 0; i < 10; i++)
// {
// h_a[i] = i;
// }
//
// cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
// d_b = d_a;
// cudaMemcpy(h_b, d_b, size, cudaMemcpyDeviceToHost);
//
// for (int i = 0; i < 10; i++)
// {
// std::cout << h_b[i] << std::endl;
// }
//
// std::cout << "Hello World!" << std::endl;
//
// delete[] h_a;
// delete[] h_b;
// cudaFree(d_a);
// cudaFree(d_b);
//
// return 0;
//}
//#include<iostream>
//#include<thrust/sort.h>
//#include<vector>
//
//int main()
//{
//
// std::vector<int> a;
// std::vector<int> index;
//
// a.resize(8);
// index.resize(a.size());
//
// for (int i = 0; i < a.size(); i++)
// {
// a[i] = i + 1;
// index[i] = i + 1;
//
// std::cout << a[i] << std::endl;
// }
//
// thrust::sort_by_key(&a[0], &a[0] + 4, &index[0], thrust::greater<int>());
//
// std::cout << std::endl;
// for (int i = 0; i < a.size(); i++)
// {
//
// std::cout << index[i] << std::endl;
// }
//}
///******************************************************************************
// * Copyright (c) 2011, Duane Merrill. All rights reserved.
// * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
// *
// * Redistribution and use in source and binary forms, with or without
// * modification, are permitted provided that the following conditions are met:
// * * Redistributions of source code must retain the above copyright
// * notice, this list of conditions and the following disclaimer.
// * * Redistributions in binary form must reproduce the above copyright
// * notice, this list of conditions and the following disclaimer in the
// * documentation and/or other materials provided with the distribution.
// * * Neither the name of the NVIDIA CORPORATION nor the
// * names of its contributors may be used to endorse or promote products
// * derived from this software without specific prior written permission.
// *
// * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
// * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// *
// ******************************************************************************/
//
// /******************************************************************************
// * Simple example of DeviceRadixSort::SortPairs().
// *
// * Sorts an array of float keys paired with a corresponding array of int values.
// *
// * To compile using the command line:
// * nvcc -arch=sm_XX example_device_radix_sort.cu -I../.. -lcudart -O3
// *
// ******************************************************************************/
//
// // Ensure printing of CUDA runtime errors to console
//#define CUB_STDERR
//
//#include <stdio.h>
//#include <algorithm>
//
//#include <cub/util_allocator.cuh>
//#include <cub/device/device_radix_sort.cuh>
//
//#include "E:/Program Files/cub-1.8.0/test/test_util.h"
//
//using namespace cub;
//
//
////---------------------------------------------------------------------
//// Globals, constants and typedefs
////---------------------------------------------------------------------
//
//bool g_verbose = false; // Whether to display input/output to console
//CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
//
//
////---------------------------------------------------------------------
//// Test generation
////---------------------------------------------------------------------
//
///**
// * Simple key-value pairing for floating point types. Distinguishes
// * between positive and negative zero.
// */
//struct Pair
//{
// float key;
// int value;
//
// bool operator<(const Pair& b) const
// {
// if (key < b.key)
// return true;
//
// if (key > b.key)
// return false;
//
// // Return true if key is negative zero and b.key is positive zero
// unsigned int key_bits = *reinterpret_cast<unsigned*>(const_cast<float*>(&key));
// unsigned int b_key_bits = *reinterpret_cast<unsigned*>(const_cast<float*>(&b.key));
// unsigned int HIGH_BIT = 1u << 31;
//
// return ((key_bits & HIGH_BIT) != 0) && ((b_key_bits & HIGH_BIT) == 0);
// }
//};
//
//
///**
// * Initialize key-value sorting problem.
// */
//void Initialize(
// float* h_keys,
// int* h_values,
// float* h_reference_keys,
// int* h_reference_values,
// int num_items)
//{
// Pair* h_pairs = new Pair[num_items];
//
// for (int i = 0; i < num_items; ++i)
// {
// RandomBits(h_keys[i]);
// RandomBits(h_values[i]);
// h_pairs[i].key = h_keys[i];
// h_pairs[i].value = h_values[i];
// }
//
// if (g_verbose)
// {
// printf("Input keys:\n");
// DisplayResults(h_keys, num_items);
// printf("\n\n");
//
// printf("Input values:\n");
// DisplayResults(h_values, num_items);
// printf("\n\n");
// }
//
// std::stable_sort(h_pairs, h_pairs + num_items);
//
// for (int i = 0; i < num_items; ++i)
// {
// h_reference_keys[i] = h_pairs[i].key;
// h_reference_values[i] = h_pairs[i].value;
// }
//
// delete[] h_pairs;
//}
//
//
////---------------------------------------------------------------------
//// Main
////---------------------------------------------------------------------
//
///**
// * Main
// */
//int main(int argc, char** argv)
//{
// int num_items = 150;
//
// // Initialize command line
// CommandLineArgs args(argc, argv);
// g_verbose = args.CheckCmdLineFlag("v");
// args.GetCmdLineArgument("n", num_items);
//
// // Print usage
// if (args.CheckCmdLineFlag("help"))
// {
// printf("%s "
// "[--n=<input items> "
// "[--device=<device-id>] "
// "[--v] "
// "\n", argv[0]);
// exit(0);
// }
//
// // Initialize device
// CubDebugExit(args.DeviceInit());
//
// printf("cub::DeviceRadixSort::SortPairs() %d items (%d-byte keys %d-byte values)\n",
// num_items, int(sizeof(float)), int(sizeof(int)));
// fflush(stdout);
//
// // Allocate host arrays
// float* h_keys = new float[num_items];
// float* h_reference_keys = new float[num_items];
// int* h_values = new int[num_items];
// int* h_reference_values = new int[num_items];
//
// // Initialize problem and solution on host
// Initialize(h_keys, h_values, h_reference_keys, h_reference_values, num_items);
//
// // Allocate device arrays
// DoubleBuffer<float> d_keys;
// DoubleBuffer<int> d_values;
// CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[0], sizeof(float) * num_items));
// CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys.d_buffers[1], sizeof(float) * num_items));
// CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values.d_buffers[0], sizeof(int) * num_items));
// CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values.d_buffers[1], sizeof(int) * num_items));
//
// // Allocate temporary storage
// size_t temp_storage_bytes = 0;
// void* d_temp_storage = NULL;
//
// CubDebugExit(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items));
// CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
//
// // Initialize device arrays
// CubDebugExit(cudaMemcpy(d_keys.d_buffers[d_keys.selector], h_keys, sizeof(float) * num_items, cudaMemcpyHostToDevice));
// CubDebugExit(cudaMemcpy(d_values.d_buffers[d_values.selector], h_values, sizeof(int) * num_items, cudaMemcpyHostToDevice));
//
// // Run
// CubDebugExit(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items));
//
// // Check for correctness (and display results, if specified)
// int compare = CompareDeviceResults(h_reference_keys, d_keys.Current(), num_items, true, g_verbose);
// printf("\t Compare keys (selector %d): %s\n", d_keys.selector, compare ? "FAIL" : "PASS");
// AssertEquals(0, compare);
// compare = CompareDeviceResults(h_reference_values, d_values.Current(), num_items, true, g_verbose);
// printf("\t Compare values (selector %d): %s\n", d_values.selector, compare ? "FAIL" : "PASS");
// AssertEquals(0, compare);
//
// // Cleanup
// if (h_keys) delete[] h_keys;
// if (h_reference_keys) delete[] h_reference_keys;
// if (h_values) delete[] h_values;
// if (h_reference_values) delete[] h_reference_values;
//
// if (d_keys.d_buffers[0]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[0]));
// if (d_keys.d_buffers[1]) CubDebugExit(g_allocator.DeviceFree(d_keys.d_buffers[1]));
// if (d_values.d_buffers[0]) CubDebugExit(g_allocator.DeviceFree(d_values.d_buffers[0]));
// if (d_values.d_buffers[1]) CubDebugExit(g_allocator.DeviceFree(d_values.d_buffers[1]));
// if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
//
// printf("\n\n");
//
// return 0;
//}
//
//
//// Ensure printing of CUDA runtime errors to console
//#define CUB_STDERR
//#include <stdio.h>
//#include <cub/util_allocator.cuh>
//#include <cub/device/device_scan.cuh>
//#include "E:\Program Files\cub-1.8.0\test/test_util.h"
//using namespace cub;
////---------------------------------------------------------------------
//// Globals, constants and typedefs
////---------------------------------------------------------------------
//bool g_verbose = false; // Whether to display input/output to console
//CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
////---------------------------------------------------------------------
//// Test generation
////---------------------------------------------------------------------
//void Initialize(
// int* h_in,
// int num_items)
//{
// for (int i = 0; i < num_items; ++i)
// h_in[i] = i;
// if (g_verbose)
// {
// printf("Input:\n");
// DisplayResults(h_in, num_items);
// printf("\n\n");
// }
//}
//int Solve(
// int* h_in,
// int* h_reference,
// int num_items)
//{
// int inclusive = 0;
// int aggregate = 0;
// for (int i = 0; i < num_items; ++i)
// {
// h_reference[i] = inclusive;
// inclusive += h_in[i];
// aggregate += h_in[i];
// }
// return aggregate;
//}
////---------------------------------------------------------------------
//// Main
////---------------------------------------------------------------------
//int main(int argc, char** argv)
//{
// int num_items = 150;
// // Initialize command line
// CommandLineArgs args(argc, argv);
// g_verbose = args.CheckCmdLineFlag("v");
// args.GetCmdLineArgument("n", num_items);
// // Print usage
// if (args.CheckCmdLineFlag("help"))
// {
// printf("%s "
// "[--n=<input items> "
// "[--device=<device-id>] "
// "[--v] "
// "\n", argv[0]);
// exit(0);
// }
// // Initialize device
// CubDebugExit(args.DeviceInit());
// printf("cub::DeviceScan::ExclusiveSum %d items (%d-byte elements)\n",
// num_items, (int)sizeof(int));
// fflush(stdout);
// // Allocate host arrays
// int* h_in = new int[num_items];
// int* h_reference = new int[num_items];
// // Initialize problem and solution
// Initialize(h_in, num_items);
// Solve(h_in, h_reference, num_items);
// // Allocate problem device arrays
// int* d_in = NULL;
// CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items));
// // Initialize device input
// CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice));
// // Allocate device output array
// int* d_out = NULL;
// CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items));
// // Allocate temporary storage
// void* d_temp_storage = NULL;
// size_t temp_storage_bytes = 0;
// CubDebugExit(DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items));
// CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// // Run
// CubDebugExit(DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items));
// // Check for correctness (and display results, if specified)
// int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose);
// printf("\t%s", compare ? "FAIL" : "PASS");
// AssertEquals(0, compare);
// // Cleanup
// if (h_in) delete[] h_in;
// if (h_reference) delete[] h_reference;
// if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
// if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
// if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// printf("\n\n");
// return 0;
//}
//#include<iostream>
//#include<cuda_runtime.h>
//
//struct Point
//{
// float x;
// float y;
// float z;
//
// Point& operator=(const float& v1)
// {
// x = v1;
// y = v1;
// z = v1;
//
// return *this;
// }
//};
//
//
//int main()
//{
// int x(1), y(2), z(3);
//
// Point a = 0.0;
//
// return 0;
//}
//#include <stdio.h>
//#include <stdlib.h>
//#include <cublas_v2.h>
//#include<cublas.h>
//#include<math.h>
//
//
//#define cudacall(call) \
// do \
// { \
// cudaError_t err = (call); \
// if(cudaSuccess != err) \
// { \
// fprintf(stderr,"CUDA Error:\nFile = %s\nLine = %d\nReason = %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
// cudaDeviceReset(); \
// exit(EXIT_FAILURE); \
// } \
// } \
// while (0)
//
//#define cublascall(call) \
// do \
// { \
// cublasStatus_t status = (call); \
// if(CUBLAS_STATUS_SUCCESS != status) \
// { \
// fprintf(stderr,"CUBLAS Error:\nFile = %s\nLine = %d\nCode = %d\n", __FILE__, __LINE__, status); \
// cudaDeviceReset(); \
// exit(EXIT_FAILURE); \
// } \
// \
// } \
// while(0)
//
//////////////////////////////////////////////////////////////////////////
////
////
//////////////////////////////////////////////////////////////////////////
//void invert(float** src, float** dst, int n, int batchSize)
//{
// cublasHandle_t handle;
// cublascall(cublasCreate_v2(&handle));
//
// int* P, * INFO;
//
// cudacall(cudaMalloc(&P, n * batchSize * sizeof(int)));
// cudacall(cudaMalloc(&INFO, batchSize * sizeof(int)));
//
// int lda = n;
//
// float** A = (float**)malloc(batchSize * sizeof(float*));
// float** A_d, * A_dflat;
//
// cudacall(cudaMalloc(&A_d, batchSize * sizeof(float*)));
// cudacall(cudaMalloc(&A_dflat, n * n * batchSize * sizeof(float)));
//
// A[0] = A_dflat;
// for (int i = 1; i < batchSize; i++)
// A[i] = A[i - 1] + (n * n);
//
// cudacall(cudaMemcpy(A_d, A, batchSize * sizeof(float*), cudaMemcpyHostToDevice));
//
// for (int i = 0; i < batchSize; i++)
// cudacall(cudaMemcpy(A_dflat + (i * n * n), src[i], n * n * sizeof(float), cudaMemcpyHostToDevice));
//
//
// cublascall(cublasSgetrfBatched(handle, n, A_d, lda, P, INFO, batchSize));
//
//
// //int INFOh[batchSize];
// int INFOh[batchSize];
// cudacall(cudaMemcpy(INFOh, INFO, batchSize * sizeof(int), cudaMemcpyDeviceToHost));
//
// for (int i = 0; i < batchSize; i++)
// if (INFOh[i] != 0)
// {
// fprintf(stderr, "Factorization of matrix %d Failed: Matrix may be singular\n", i);
// cudaDeviceReset();
// exit(EXIT_FAILURE);
// }
//
// float** C = (float**)malloc(batchSize * sizeof(float*));
// float** C_d, * C_dflat;
//
// cudacall(cudaMalloc(&C_d, batchSize * sizeof(float*)));
// cudacall(cudaMalloc(&C_dflat, n * n * batchSize * sizeof(float)));
// C[0] = C_dflat;
// for (int i = 1; i < batchSize; i++)
// C[i] = C[i - 1] + (n * n);
// cudacall(cudaMemcpy(C_d, C, batchSize * sizeof(float*), cudaMemcpyHostToDevice));
// cublascall(cublasSgetriBatched(handle, n, (const float**)A_d, lda, P, C_d, lda, INFO, batchSize));
//
// cudacall(cudaMemcpy(INFOh, INFO, batchSize * sizeof(int), cudaMemcpyDeviceToHost));
//
// for (int i = 0; i < batchSize; i++)
// if (INFOh[i] != 0)
// {
// fprintf(stderr, "Inversion of matrix %d Failed: Matrix may be singular\n", i);
// cudaDeviceReset();
// exit(EXIT_FAILURE);
// }
// for (int i = 0; i < batchSize; i++)
// cudacall(cudaMemcpy(dst[i], C_dflat + (i * n * n), n * n * sizeof(float), cudaMemcpyDeviceToHost));
//
// cudaFree(A_d); cudaFree(A_dflat); free(A);
// cudaFree(C_d); cudaFree(C_dflat); free(C);
// cudaFree(P); cudaFree(INFO); cublasDestroy_v2(handle);
//}
//
//
//////////////////////////////////////////////////////////////////////////
////
////
//////////////////////////////////////////////////////////////////////////
//
//
//__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width)
//{
// //2D Thread ID
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// int row = blockIdx.y * blockDim.y + threadIdx.y;
//
// //Pvalue stores the Pd element that is computed by the thread
// float Pvalue = 0;
// if (col < Width && row < Width)
// {
// for (int k = 0; k < Width; ++k)
// {
// float Mdelement = Md[row * Width + k];
// float Ndelement = Nd[k * Width + col];
// Pvalue += (Mdelement * Ndelement);
//
// }
// Pd[row * Width + col] = Pvalue;
// }
//}
//
//
//
//void mul(float* M, float* N, int Width)
//{
//
// float* P = (float*)malloc(Width * Width * sizeof(float));
// float* Md, * Nd, * Pd;
//
//
//
// unsigned long int size = Width * Width * sizeof(float);
//
//
// //Transfer M and N to device memory
// cudaMalloc((void**)&Md, size);
// cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
//
// cudaMalloc((void**)&Nd, size);
// cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
//
// //Allocate P on the device
// cudaMalloc((void**)&Pd, size);
//
// //Setup the execution configuration
// dim3 dimBlock(Width, Width);
// dim3 dimGrid(1, 1);
//
//
// if (Width * Width > 1024)
// {
// //printf("\n\n enter inside if condi\n\n");
//
// dimGrid.x = (Width - 1) / 32 + 1;
// dimGrid.y = (Width - 1) / 32 + 1;
//
// dimBlock.x = 32;
// dimBlock.y = 32;
//
//
//
// }
//
//
// //Launch the device computation threads!
// MatrixMulKernel << <dimGrid, dimBlock >> > (Md, Nd, Pd, Width);
//
// //Transfer P from device to host
// cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
//
// //Free device matrices
// cudaFree(Md);
// cudaFree(Nd);
// cudaFree(Pd);
//
// int i;
//
// fprintf(stdout, "\n\n");
//
// if (Width < 11)
// {
//
//
// fprintf(stdout, "\n\nMatrix Multiplication, M x Inv(M) :\n\n");
// for (i = 0; i < Width * Width; i++)
// {
// if (P[i])
// fprintf(stdout, "%10f ", P[i]);
// else
// fprintf(stdout, "%9f ", P[i]);
//
//
//
//
// if ((i + 1) % Width == 0)
// fprintf(stdout, "\n");
// }
//
//
// }
// else
// {
// FILE* fp;
//
// fp = fopen("Mat_Inv_out", "a");
//
// if (!fp)
// {
// fprintf(stderr, "Failed to open matAdata.\n");
// exit(1);
// }
// fprintf(fp, "\n\nMatrix Multiplication, M x Inv(M) :\n\n");
// for (i = 0; i < Width * Width; i++)
// {
// if (P[i])
// fprintf(fp, "%10f ", P[i]);
// else
// fprintf(fp, "%9f ", P[i]);
//
// if ((i + 1) % Width == 0)
// fprintf(fp, "\n");
// }
// fclose(fp);
// }
//
//
// //printf("\n Matrix multiplication completed !!\n\n");
// free(M);
// free(N);
// free(P);
//
//}
//
//
//////////////////////////////////////////////////////////////////////////
////
////
//////////////////////////////////////////////////////////////////////////
//
//
//void fill(float* h, int w)
//{
//
// unsigned int i, num;
// int divide;
// FILE* f;
//
// f = fopen("/dev/urandom", "r");
// if (!f) {
// fprintf(stderr, "Failed open file\n");
// exit(1);
// }
// for (i = 0; i < w * w; i++)
// {
// fread(&num, sizeof(unsigned int), 1, f);
// fread(÷, sizeof(int), 1, f);
// h[i] = ((float)num) / ((float)divide);
// //scanf("%f",&h[i]);
// }
// fclose(f);
// /*
// unsigned int i;
// srand((unsigned int)time(NULL));
// for(i=0; i< w*w; i++)
// {
// h[i] = ((float)rand()/(float)(RAND_MAX)) * 99;
// //scanf("%f",&h[i]);
// }
//
// */
//
//}
//
//////////////////////////////////////////////////////////////////////////
////
////
//////////////////////////////////////////////////////////////////////////
//
//void test_invert(int n)
//{
//
// //printf("Enter the order of the square matrix :");
// //scanf("%d",&n);
// const int mybatch = 1;
//
//
// //float* mat1[n * n];
// float mat1_size = sizeof(float) * n * n;
// float* mat1 = (float*)malloc(mat1_size);
//
// fill(mat1, n);
//
// float* result_flat = (float*)malloc(mybatch * n * n * sizeof(float));
// float** results = (float**)malloc(mybatch * sizeof(float*));
//
// for (int i = 0; i < mybatch; i++)
// results[i] = result_flat + (i * n * n);
//
// float** inputs = (float**)malloc(mybatch * sizeof(float*));
//
// //inputs[0] = zero_pivot;
//
// inputs[0] = mat1;
//
//
// invert(inputs, results, n, mybatch);
//
// if (n < 11)
// {
//
// for (int qq = 0; qq < mybatch; qq++)
// {
// if (mybatch == 1)
// fprintf(stdout, "Input Matrix, M :\n\n");
// else
// fprintf(stdout, "Input Matrix %d:\n\n", qq);
//
// for (int i = 0; i < n; i++)
// {
// for (int j = 0; j < n; j++)
// {
// if (inputs[qq][i * n + j])
// fprintf(stdout, "%12f ", inputs[qq][i * n + j]);
// else
// fprintf(stdout, "%11f ", inputs[qq][i * n + j]);
// }
// fprintf(stdout, "\n");
// }
// }
// fprintf(stdout, "\n\n");
//
//
//
//
// for (int qq = 0; qq < mybatch; qq++)
// {
//
// if (mybatch == 1)
// fprintf(stdout, "Inverse of the Input Matrix, Inv(M):\n\n");
// else
// fprintf(stdout, "Inverse Matrix %d:\n\n", qq);
// for (int i = 0; i < n; i++)
// {
// for (int j = 0; j < n; j++)
// {
// if (results[qq][i * n + j])
// fprintf(stdout, "%10f ", results[qq][i * n + j]);
// else
// fprintf(stdout, "%9f ", results[qq][i * n + j]);
//
// }
// fprintf(stdout, "\n");
// }
// }
// }
//
//
// else // order of the matrix is more than 10 x 10 then output the results in the file
// {
// printf("\nThe order of matrix is too large to display in terminal\n, Please open the file : Mat_Inv_out.txt located in the current folder. To see the output.\n\n");
//
// FILE* fp;
//
//
// fp = fopen("Mat_Inv_out", "w");
//
// if (!fp)
// {
// fprintf(stderr, "Failed to open Mat_Inv_out.\n");
// exit(1);
// }
//
//
//
// for (int qq = 0; qq < mybatch; qq++)
// {
//
// if (mybatch == 1)
// fprintf(fp, "Input Matrix , M:\n\n");
// else
// fprintf(fp, "Input Matrix %d:\n\n", qq);
//
//
//
//
// for (int i = 0; i < n; i++)
// {
// for (int j = 0; j < n; j++)
// {
// if (inputs[qq][i * n + j])
// fprintf(fp, "%12f ", inputs[qq][i * n + j]);
// else
// fprintf(fp, "%11f ", inputs[qq][i * n + j]);
// }
//
// fprintf(fp, "\n");
// }
// }
// fprintf(fp, "\n\n");
//
// for (int qq = 0; qq < mybatch; qq++)
// {
// if (mybatch == 1)
// fprintf(fp, "Inverse of the Input Matrix, Inv(M):\n\n");
// else
// fprintf(fp, "Inverse %d:\n\n", qq);
// for (int i = 0; i < n; i++)
// {
// for (int j = 0; j < n; j++)
// {
// if (results[qq][i * n + j])
// fprintf(fp, "%10f ", results[qq][i * n + j]);
// else
// fprintf(fp, "%9f ", results[qq][i * n + j]);
//
// }
//
// fprintf(fp, "\n");
// }
// }
//
// fclose(fp);
//
// }// end of if else condition for output
//
// float* A, * B;
//
// A = inputs[0];
// B = results[0];
// mul(A, B, n);
//
// //mul(inputs[0][], results[0][], n );
//
//}
//
//////////////////////////////////////////////////////////////////////////
////
////
//////////////////////////////////////////////////////////////////////////
//
//int main(int argc, char* argv[])
//{
// if (argc != 2)
// {
// printf("Usage: %s <matrix_width>\n", argv[0]);
// return 0;
// }
//
// int w;
// w = atoi(argv[1]);
//
// test_invert(w);
// return 0;
//}
//#include <stdio.h>
//#include <stdlib.h>
//#include <cublas_v2.h>
////#include<cublas.h>
//#include<math.h>
//#include<cuda_runtime.h>
//
//int main()
//{
// cublasHandle_t handle;
//
// cublasCreate(&handle);
//
//
// int size = 50; //к
// int num = 100;//ľ
// int* info;//ڼ¼LUֽǷɹ
// int* pivo;//ڼ¼LUֽϢ
// cudaMalloc((void**)&info, sizeof(int) * num);
// cudaMalloc((void**)&pivo, sizeof(int) * size * num);
// float** mat = new float* [num];//ľ
// float** invMat = new float* [num];//ľ
// for (int i = 0; i < num; i++) {
// cudaMalloc((void**)&mat[i], sizeof(float) * size * size);
// cudaMalloc((void**)&invMat[i], sizeof(float) * size * size);
// /*
// ォmat[i],ڴŵ
// */
// }
//
// float** gpuMat;
// cudaMalloc((void**)&gpuMat, sizeof(float*) * num);
// cudaMemcpy(gpuMat, mat, sizeof(float*) * num, cudaMemcpyHostToDevice);
// //Ŀǰhostϵfloat ** ָתΪ deviceϵ float ** ָ
//
// cublasSgetrfBatched(handle, size, gpuMat, size, pivo, info, num);//ĸǾάڴеĴģsize
//
//
// const float** constMat;
// cudaMalloc((void**)&constMat, sizeof(float*) * num);
// cudaMemcpy(constMat, gpuMat, sizeof(float*) * num, cudaMemcpyDeviceToDevice);
// //Ŀǰ float ** ָתΪ float *[]ָ
//
// float** gpuInvMat;
// cudaMalloc((void**)&gpuInvMat, sizeof(float*) * num);
// cudaMemcpy(gpuInvMat, invMat, sizeof(float*) * num, cudaMemcpyHostToDevice);
//
// //Ŀǰhostϵfloat ** ָתΪ deviceϵ float ** ָ
//
// cublasSgetriBatched(handle, size, constMat, size, pivo, gpuInvMat, size, info, num);
//
// cudaFree(info);
// cudaFree(pivo);
// cudaFree(mat);
// cudaFree(gpuMat);
// cudaFree(gpuInvMat);
// cudaFree(constMat);
//}
|
6,286 | #include <stdio.h>
#include <iostream>
#include <cuda_runtime.h>
#include <string>
#define THREADBLOCK_SIZE 128
#define WORKING_SET_SIZE_ELEM_BITS 21
#define WORKING_SET_SIZE_ELEMS (1 << WORKING_SET_SIZE_ELEM_BITS)
#define ITERATION_COUNT 5
// FNV-1a released into public domain
#define INITIAL_HASH 14695981039346656037ULL
__forceinline__ __device__ unsigned long long hash(unsigned long long current_hash, int val)
{
return (current_hash ^ (unsigned long long)val) * 1099511628211ULL;
}
template<bool write>
__launch_bounds__(THREADBLOCK_SIZE)
__global__ void
access_random(float * in_buf, int coalescing_mask)
{
int elem_id = blockIdx.x * THREADBLOCK_SIZE + threadIdx.x;
int base_read_elem_id = elem_id & coalescing_mask;
int thread_local_id = base_read_elem_id - elem_id;
float res = 0.0F;
unsigned long long current_hash = INITIAL_HASH;
for(int i = 0; i < ITERATION_COUNT; ++i)
{
current_hash = hash(current_hash, base_read_elem_id + i);
int offset = (current_hash + thread_local_id * 8) & ((1 << WORKING_SET_SIZE_ELEM_BITS) - 1);
res += __ldg(in_buf + offset);
if (write)
in_buf[offset] = 1.0F;
}
if (elem_id == res)
in_buf[elem_id & ((1 << WORKING_SET_SIZE_ELEM_BITS) - 1)] = res;
}
int main(int argc, char *argv[])
{
float * d_buffer_in;
int coalescing_mask = ~0;
int threadblock_count = 65536 * 4;
std::string mode = "read";
if (argc > 1)
coalescing_mask = ~(atol(argv[1]) - 1);
if (argc > 2)
mode = argv[2];
if (argc > 3)
threadblock_count = atol(argv[3]);
std::cout << "coalescing mask = " << coalescing_mask << ", threadblock_count = " << threadblock_count
<< ", mode = " << mode << std::endl;
cudaMalloc((void **)&d_buffer_in, sizeof(float) * WORKING_SET_SIZE_ELEMS);
if (mode == "read")
access_random<false><<<threadblock_count, THREADBLOCK_SIZE>>>(d_buffer_in, coalescing_mask);
else if (mode == "write")
access_random<true><<<threadblock_count, THREADBLOCK_SIZE>>>(d_buffer_in, coalescing_mask);
cudaDeviceSynchronize();
cudaFree(d_buffer_in);
cudaDeviceReset();
return 0;
}
|
6,287 | #ifdef __cplusplus
extern "C" {
#endif
__global__ void kernel_compute(int* trainingSet, int* data, int* res, int setSize, int dataSize){
int diff, toAdd, computeId;
computeId = blockIdx.x * blockDim.x + threadIdx.x;
//__shared__ int set[784];
if(computeId < setSize){
diff = 0;
for(int i = 0; i < dataSize; i++){
toAdd = data[i] - trainingSet[computeId*784 + i];
diff += toAdd * toAdd;
}
res[computeId] = diff;
}
}
#ifdef __cplusplus
}
#endif
|
6,288 | #include <iostream>
#include <iomanip>
#include <cstdio>
using namespace std;
const int d = 8;
const int w = 4;
template <class T>
__global__ void runMaxtrix(T *d_m, T *d_mout, int d){
__shared__ T b_mr[w][w];
__shared__ T b_mc[w][w];
int bdx = blockIdx.x;
int bdy = blockIdx.y;
int tdx = threadIdx.x;
int tdy = threadIdx.y;
int row = bdx * blockDim.x + tdx;
int col = bdy * blockDim.y + tdy;
int pos = row*d+col;
//printf("(%d, %d) (%d, %d): (%d, %d) %d\n", bdx, bdy, tdx, tdy, row, col, d_m[pos]);
T mval = 0;
for (int bk = 0; bk < d/w; bk++){
b_mr[tdx][tdy] = d_m[(bdx*w + tdx)*d + bk * w + tdy]; // (bdx*w+tdx, bk*w+tdy)
b_mc[tdx][tdy] = d_m[(bdy*w + tdx)*d + bk * w + tdy]; // (bdy*w+tdx, bk*w+tdy)
// printf("(%d, %d)\n", b_mr[tdx][tdy], b_mc[tdx][tdy]);
__syncthreads();
for (int tk = 0; tk < w; tk++){
mval += b_mr[tdx][tk] * b_mc[tdy][tk];
printf("(%d, %d), tval=->%d\n", row, col, mval);
}
__syncthreads();
}
d_mout[pos] = mval;
}
template <class T> void diplayMatrix(T *m, int d){
cout << "m={" << endl;
for (int i = 0; i < d; i++){
cout << "\t{";
for (int j = 0; j < d - 1; j++){
cout << setw(5) << m[i*d+j] << ", ";
}
cout << setw(5) << m[i*d + d - 1] <<
((i==d-1)? "}" : "},") << endl;
}
cout << "};" << endl;
}
int main(int argc, char *argv[]){
int h_m[d*d];
int h_mout[d*d];
for (int i = 0; i < d; i++){
for (int j = 0; j < d; j++){
h_m[i*d + j] = i * 100 + j;
}
}
diplayMatrix(h_m, d);
int *d_m, *d_mout;
cudaMalloc((void **)&d_m, d*d*sizeof(int));
cudaMalloc((void **)&d_mout, d*d*sizeof(int));
cudaMemcpy(d_m, h_m, d*d*sizeof(int), cudaMemcpyHostToDevice);
runMaxtrix<<<dim3(2,2,1), dim3(4,4,1)>>>(d_m, d_mout, d);
cudaDeviceSynchronize();
cudaMemcpy(h_mout, d_mout, d*d*sizeof(int), cudaMemcpyDeviceToHost);
diplayMatrix(h_mout, d);
cudaFree(d_m);
cudaFree(d_mout);
return 0;
}
|
6,289 | /*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Caffe copyright:
COPYRIGHT
All contributions by the University of California:
Copyright (c) 2014-2017 The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014-2017, the respective contributors
All rights reserved.
Caffe uses a shared copyright model: each contributor holds copyright over
their contributions to Caffe. The project versioning records all such
contribution and copyright details. If a contributor wants to further mark
their specific copyright on a particular contribution, they should indicate
their copyright solely in the commit message of the change when it is
committed.
LICENSE
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "common.cuh"
#include <cuda_runtime_api.h>
#include <math_constants.h>
#include <stdint.h>
__global__ void max_pool_fwd_f32_kernel(
const uint32_t nthreads,
const float* const bottom_data,
const int num,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
float* const top_data,
int32_t* const mask)
{
uint32_t index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
float maxval = -CUDART_INF_F;
int maxidx = -1;
const float* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
if (NULL != top_data) {
top_data[index] = maxval;
}
if (NULL != mask) {
mask[index] = maxidx;
}
}
}
extern "C" void arraydiff_cuda_kernel_max_pool_fwd_f32(
size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz,
size_t y_w, size_t y_h,
size_t kernel_w, size_t kernel_h,
size_t stride_w, size_t stride_h,
size_t pad_w, size_t pad_h,
const float *x,
float *maybe_y,
int32_t *maybe_mask,
cudaStream_t stream)
{
uint32_t n = y_w * y_h * chan_dim * batch_sz;
max_pool_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
n, x,
batch_sz, chan_dim, x_h, x_w,
y_h, y_w,
kernel_h, kernel_w,
stride_h, stride_w,
pad_h, pad_w,
maybe_y, maybe_mask);
}
__global__ void max_pool_bwd_f32_kernel(
const uint32_t nthreads,
const float* const top_diff,
const int32_t* const mask,
const int num,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
float* const bottom_diff)
{
uint32_t index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
float gradient = 0.0f;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const float* const top_diff_slice = top_diff + offset;
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
bottom_diff[index] += gradient;
}
}
extern "C" void arraydiff_cuda_kernel_max_pool_bwd_f32(
size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz,
size_t y_w, size_t y_h,
size_t kernel_w, size_t kernel_h,
size_t stride_w, size_t stride_h,
size_t pad_w, size_t pad_h,
const float *dy,
const int32_t *mask,
float *dx,
cudaStream_t stream)
{
uint32_t n = x_w * x_h * chan_dim * batch_sz;
max_pool_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
n, dy, mask,
batch_sz, chan_dim, x_h, x_w,
y_h, y_w,
kernel_h, kernel_w,
stride_h, stride_w,
pad_h, pad_w,
dx);
}
__global__ void avg_pool_fwd_f32_kernel(
const uint32_t nthreads,
const float* const bottom_data,
const int num,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
float* const top_data)
{
uint32_t index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
float aveval = 0.0f;
const float* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
extern "C" void arraydiff_cuda_kernel_avg_pool_fwd_f32(
size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz,
size_t y_w, size_t y_h,
size_t kernel_w, size_t kernel_h,
size_t stride_w, size_t stride_h,
size_t pad_w, size_t pad_h,
const float *x,
float *y,
cudaStream_t stream)
{
uint32_t n = y_w * y_h * chan_dim * batch_sz;
avg_pool_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
n, x,
batch_sz, chan_dim, x_h, x_w,
y_h, y_w,
kernel_h, kernel_w,
stride_h, stride_w,
pad_h, pad_w,
y);
}
__global__ void avg_pool_bwd_f32_kernel(
const uint32_t nthreads,
const float* const top_diff,
const int num,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
float* const bottom_diff)
{
uint32_t index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
float gradient = 0.0f;
const float* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] += gradient;
}
}
extern "C" void arraydiff_cuda_kernel_avg_pool_bwd_f32(
size_t x_w, size_t x_h, size_t chan_dim, size_t batch_sz,
size_t y_w, size_t y_h,
size_t kernel_w, size_t kernel_h,
size_t stride_w, size_t stride_h,
size_t pad_w, size_t pad_h,
const float *dy,
float *dx,
cudaStream_t stream)
{
uint32_t n = x_w * x_h * chan_dim * batch_sz;
avg_pool_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
n, dy,
batch_sz, chan_dim, x_h, x_w,
y_h, y_w,
kernel_h, kernel_w,
stride_h, stride_w,
pad_h, pad_w,
dx);
}
|
6,290 | //pass: checka um retorno do tipo "ponteiro pra função"
//--blockDim=1024 --gridDim=1 --no-inline
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2//1024
typedef float(*funcType)(float*, unsigned int);
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
__device__ funcType grabFunction(int i) {
//__requires(i != 0);
//__ensures(__return_val_funptr(funcType) == divideByTwo);
if (i == 0)
return multiplyByTwo;
else
return divideByTwo;
}
__global__ void foo(float *v, unsigned int size, int i)
{
//__requires(i != 0);
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
funcType f = grabFunction(i);
if (tid < size)
{
float x = (*f)(v, tid);
x += multiplyByTwo(v, tid);
v[threadIdx.x] = x;
}
}
int nondet_int(){
int a;
return a;
}
int main(){
int c = 0; /*define se multiplicará ou dividirá por 2, deve ser 0 ou outro valor, para escolher a função*/
float* v;
float* a;
float* dev_v;
funcType fun;
//fun = (funcType)malloc(sizeof(funcType));
printf("Digite 0 para multiplicar um vetor por 4 ou\nDigite outro valor para multiplicar um vetor por 2.5: \n");
c = nondet_int(); //scanf("%u", &c);
v = (float*)malloc(N*sizeof(float));
a = (float*)malloc(N*sizeof(float));
for (int i = 0; i < N; ++i){
v[i] = rand() %10+1;
printf(" %.1f; ", v[i]);
}
printf("\n");
cudaMalloc((void**)&dev_v, N*sizeof(float));
cudaMemcpy(dev_v, v, N*sizeof(float), cudaMemcpyHostToDevice);
foo<<<1, N>>>(dev_v, N, c);
//ESBMC_verify_kernel_fuintint (foo,1, N, dev_v, N, c);
cudaMemcpy(a, dev_v, N*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i){
printf(" %.1f; ", a[i]);
if (c==0)
assert(a[i]==4*v[i]);
else
assert(a[i]==2.5*v[i]);
}
free(v); free(a);
cudaFree(dev_v);
return 0;
}
|
6,291 | #import <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
void error(char const *str)
{
fprintf(stderr, "%s\n", str);
exit(1);
}
void cuda_check(cudaError_t err, char const *str)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s: CUDA error %d (%s)\n",
str, err, cudaGetErrorString(err));
}
}
__host__ __device__
float4 operator+(const float4 &a, const float4 &b)
{
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
__global__
void init_vec(int nels, float* __restrict__ d_vec1)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
d_vec1[i] = i;
}
__global__
void multi_vec2(int nels,int n_row1,int n_col1,int n_row2,int n_col2,float* __restrict__ res_vec,float* __restrict__ d_vec1,float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int r_res,c_res;
r_res=n_row1;
c_res=n_row2*n_col2;
if(i<(r_res*c_res)){
//int c= blockIdx.x*n_row1 + (threadIdx.x)%n_col1;
int c= ((int)(i/c_res))*n_row1 + ((int)(i%n_col1))%n_col1;
int j= ((int)(((int)(i%c_res))/n_row2) + (((int)(i%c_res))%n_row2)*n_col2);
printf("%f %f\n",d_vec1[c],d_vec2[j] );
res_vec[i]=d_vec1[c]*d_vec2[j];
}
}
__global__
void multi_vec(int nels,int n_row1,int n_col1,int n_row2,int n_col2,float* __restrict__ res_vec,
float* __restrict__ d_vec1,float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int r_res,c_res;
r_res=n_row1;
c_res=n_row2*n_col2;
if(i<(r_res*c_res)){
int c= ((int)(i/c_res))*n_row1 + ((int)(i%n_col1))%n_col1;
int j= ((int)(((int)(i%c_res))/n_row2) + (((int)(i%c_res))%n_row2)*n_col2);
res_vec[i]=d_vec1[c]*d_vec2[j];
}
}
__global__
void scalareMatrice(float* __restrict__ res_vec,float scalar,float* __restrict__ d_vec)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i]=d_vec[i]*scalar;
}
__global__
void reduction_row(int nels,int l_elem,float* res_vec, float* d_vec1)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
const float4 noels = make_float4(0.0, 0.0, 0.0, 0.0);
const int nquarts = nels*4;
const int elem=nels/l_elem;
int i=idx*l_elem;
int i0 = i;
int i1 = i + 4;
int i2 = i + 2*4;
int i3 = i + 3*4;
__syncthreads();
float4 r0;
if(l_elem >= 4){
r0.x=d_vec1[i];
r0.y=d_vec1[i+1];
r0.z=d_vec1[i+2];
r0.w=d_vec1[i+3];
}
else r0= noels;
float4 r1;
if(l_elem >= 8){
r1.x=d_vec1[i1];
r1.y=d_vec1[i1+1];
r1.z=d_vec1[i1+2];
r1.w=d_vec1[i1+3];
}
else r1= noels;
float4 r2;
if(l_elem >= 12){
r2.x=d_vec1[i2];
r2.y=d_vec1[i2+1];
r2.z=d_vec1[i2+2];
r2.w=d_vec1[i2+3];
}
else r2= noels;
float4 r3;
if(l_elem >= 16){
r3.x=d_vec1[i3];
r3.y=d_vec1[i3+1];
r3.z=d_vec1[i3+2];
r3.w=d_vec1[i3+3];
}
else r3= noels;
float4 v = (r0 + r1) + (r2 + r3);
if (idx < nels)
res_vec[idx] = (v.x + v.y) + (v.z + v.w);
}
__global__
void transpose(int nrow,int ncols, float* __restrict__ res_vec, float* __restrict__ d_vec1)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int c =i%ncols;
int r=i/ncols;
int l_in = r*ncols + c;
int l_out = c * nrow + r;
res_vec[l_out] = d_vec1[l_in];
}
__global__
void vecsum(int nels, float* __restrict__ res_vec, float* __restrict__ d_vec1, float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i] = d_vec1[i]+d_vec2[i];
}
__global__
void vecdif(int nels, float* __restrict__ res_vec, float* __restrict__ d_vec1, float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i] = d_vec1[i]-d_vec2[i];
}
void stampa(float* matrice,int m){
int i,j;
printf("\n");
for(i=0;i<m;i++){
printf("%f ",matrice[i]);
printf("\n");
}
}
int main(int argc, char *argv[]){
float* matriceA;
float* matriceB;
float* matriceX;
float* pk;
float* trasposta;
float* prodotto;
float* somma;
float* res;
float* den;
float* res0;
float* res1;
float* res2;
float* red_den;
float* matrice;
float* scalar;
float* num;
float* deno;
float ak;
int nels;
if (argc != 2) {
error("syntax: vecsum nels v");
}
int N = atoi(argv[1]);
if (N < 0) {
error("N < 0");
}
int M=1;
nels=N*N;
size_t memsize = nels*sizeof(float);
cudaError_t err;
err = cudaMalloc((void**)&matriceA, memsize);
cuda_check(err, "alloc matriceA");
err = cudaMalloc((void**)&matriceB, N*M*sizeof(float));
cuda_check(err, "alloc matriceB");
err = cudaMalloc((void**)&matriceX, N*sizeof(float));
cuda_check(err, "alloc matriceX");
err = cudaMallocHost(&matrice, N*N*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMallocHost(&num, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMallocHost(&deno, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMalloc((void**)&somma,nels*M*sizeof(float));
cuda_check(err, "alloc somma");
err = cudaMalloc((void**)&res,M*N*N*sizeof(float));
cuda_check(err, "alloc res");
err = cudaMalloc((void**)&res0,N*M*N*sizeof(float));
cuda_check(err, "alloc res0");
err = cudaMalloc((void**)&prodotto,M*N*N*sizeof(float));
cuda_check(err, "alloc prodotto");
err = cudaMalloc((void**)&res1,M*N*sizeof(float));
cuda_check(err, "alloc res1");
err = cudaMalloc((void**)&res2,M*N*N*sizeof(float));
cuda_check(err, "alloc res2");
err = cudaMalloc((void**)&pk,M*N*sizeof(float));
cuda_check(err, "alloc pk");
err = cudaMalloc((void**)&trasposta,M*N*sizeof(float));
cuda_check(err, "alloc trasposta ");
err = cudaMalloc((void**)&den,M*N*sizeof(float));
cuda_check(err, "alloc den");
err = cudaMalloc((void**)&red_den,M*N*sizeof(float));
cuda_check(err, "alloc den");
err = cudaMalloc((void**)&scalar,M*N*sizeof(float));
cuda_check(err, "alloc scalar");
cudaEvent_t pre_init, post_init, pre_sum, post_sum,pre_prodotto,post_prodotto,
pre_transpose,post_transpose,pre_scalar_matrice,post_scalar_matrice,pre_vecsum,post_vecsum,
pre_vecdif,post_vecdif;
err = cudaEventCreate(&pre_init, 0);
cuda_check(err, "create pre_init");
err = cudaEventCreate(&pre_prodotto, 0);
cuda_check(err, "create pre_sum");
err = cudaEventCreate(&pre_transpose, 0);
cuda_check(err, "create pre_traspose");
err = cudaEventCreate(&pre_scalar_matrice, 0);
cuda_check(err, "create pre_scalar_matrice");
err = cudaEventCreate(&pre_vecdif, 0);
cuda_check(err, "create pre_vecdif");
err = cudaEventCreate(&pre_vecsum, 0);
cuda_check(err, "create pre_vecsum");
err = cudaEventCreate(&post_init, 0);
cuda_check(err, "create post_init");
err = cudaEventCreate(&post_prodotto, 0);
cuda_check(err, "create post_sum");
err = cudaEventCreate(&post_transpose, 0);
cuda_check(err, "create post_traspose");
err = cudaEventCreate(&post_scalar_matrice, 0);
cuda_check(err, "create post_scalar_matrice");
err = cudaEventCreate(&post_vecdif, 0);
cuda_check(err, "create post_vecdif");
err = cudaEventCreate(&post_vecsum, 0);
cuda_check(err, "create post_vecsum");
const int blockSize = 512;
int numBlocks = (nels + blockSize - 1)/blockSize;
cudaEventRecord(pre_init);
init_vec<<<blockSize,numBlocks>>>(nels, matriceA);
cudaEventRecord(post_init);
numBlocks = (M*N + blockSize - 1)/blockSize;
init_vec<<<blockSize, numBlocks>>>(M*N, matriceB);
init_vec<<<blockSize, numBlocks>>>(M*N, matriceX);
int i;
//calcolo i parametri della riduzione
int THREAD_LOAD=0;
float n = N;
while (n > 1) {
n/=4;
if(n==1){
THREAD_LOAD=4;
}
}
n = N;
while (n > 1) {
n/=8;
if(n==1){
THREAD_LOAD=8;
}
}
n=N;
while (n > 1) {
n/=12;
if(n==1){
THREAD_LOAD=12;
}
}
while (n > 1) {
n/=16;
if(n==1){
THREAD_LOAD=16;
}
}
if(THREAD_LOAD==0){
printf("Errore N deve essere una potenza di 4,8,12,16");
exit(0);
}
int j;
int c=N;
float* temp;
float runtime_red_ms;
int lr=0;
int log=N*N;
while(log>N){
++lr;
log=log/THREAD_LOAD;
}
cudaEvent_t pre_red[lr], post_red[lr];
//inizializzo gli eventi per la riduzione
for(i=0;i<lr;i++){
err = cudaEventCreate(&(pre_red[i]), 0);
cuda_check(err, "create pre_red");
err = cudaEventCreate(&(post_red[i]), 0);
cuda_check(err, "create post_red");
}
for(i=0;i<1;i++){
numBlocks = (nels + blockSize - 1)/blockSize;
cudaEventRecord(pre_prodotto);
multi_vec<<<blockSize, numBlocks>>>(nels*M,N,N,N,M,somma,matriceA,matriceX);
cudaEventRecord(post_prodotto);
c=N*N;
int nels_red=0;
int cont=0;
while(c>N){
c/=THREAD_LOAD;
nels_red+=c;
numBlocks = (c + blockSize - 1)/blockSize;
cudaEventRecord(pre_red[cont]);
reduction_row<<<blockSize, numBlocks>>>(c,THREAD_LOAD,res0,somma);
cudaEventRecord(post_red[cont]);
err = cudaMemcpy(somma, res0, c*sizeof(float), cudaMemcpyDeviceToDevice);
cuda_check(err, "cpy");
cont++;
}
printf("%d %d\n",lr,nels_red );
numBlocks = (N*M + blockSize - 1)/blockSize;
cudaEventRecord(pre_vecdif);
vecdif<<<blockSize, numBlocks>>>(N*M,pk,matriceB,res0);
cudaEventRecord(post_vecdif);
numBlocks = (N*N + blockSize - 1)/blockSize;
cudaEventRecord(pre_transpose);
transpose<<<blockSize, numBlocks>>>(N,M,trasposta,pk);
cudaEventRecord(post_transpose);
numBlocks = (M*N + blockSize - 1)/blockSize;
multi_vec<<<blockSize, numBlocks>>>(N*M,M,N,N,M,prodotto,trasposta,pk);
c=N;
while (c>1) {
c/=THREAD_LOAD;
numBlocks = (c + blockSize - 1)/blockSize;
reduction_row<<<blockSize, numBlocks>>>(c,THREAD_LOAD,res1,prodotto);
err = cudaMemcpy(prodotto, res1, c*sizeof(float), cudaMemcpyDeviceToDevice);
cuda_check(err, "cpy");
}
numBlocks = (M*N*N*M + blockSize - 1)/blockSize;
multi_vec<<<blockSize, numBlocks>>>(M*N*N*M,M,N,N,N,res,trasposta,matriceA);
c=N*N;
while (c>N) {
c/=THREAD_LOAD;
numBlocks = (c + blockSize - 1)/blockSize;
reduction_row<<<blockSize, numBlocks>>>(c,THREAD_LOAD,res2,res);
err = cudaMemcpy(res, res2, c*sizeof(float), cudaMemcpyDeviceToDevice);
cuda_check(err, "cpy");
}
numBlocks = (N*N + blockSize - 1)/blockSize;
multi_vec<<<blockSize, numBlocks>>>(N*N,M,N,N,M,den,res2,pk);
c=N;
while (c>1) {
c/=THREAD_LOAD;
numBlocks = (c + blockSize - 1)/blockSize;
reduction_row<<<blockSize, numBlocks>>>(c,THREAD_LOAD,red_den,den);
err = cudaMemcpy(den, red_den, c*sizeof(float), cudaMemcpyDeviceToDevice);
cuda_check(err, "cpy");
}
err = cudaMemcpy(num, res1, 1*sizeof(float), cudaMemcpyDeviceToHost);
err = cudaMemcpy(deno, red_den, 1*sizeof(float), cudaMemcpyDeviceToHost);
ak=num[0]/deno[0];
printf("%f\n",ak );
numBlocks = (N + blockSize - 1)/blockSize;
cudaEventRecord(pre_scalar_matrice);
scalareMatrice<<<blockSize, numBlocks>>>(scalar,ak,pk);
cudaEventRecord(post_scalar_matrice);
numBlocks = (N*N + blockSize - 1)/blockSize;
cudaEventRecord(pre_vecsum);
vecsum<<<blockSize, numBlocks>>>(N*M*N,matriceX,matriceX,scalar);
cudaEventRecord(post_vecsum);
err = cudaMemcpy(matrice, matriceX, M*N*sizeof(float), cudaMemcpyDeviceToHost);
cuda_check(err, "create mem");
stampa(matrice,M*N);
float runtime_init_ms, runtime_prodotto_ms, runtime_red_ms,runtime_transpose_ms,runtime_scalar_matrice_ms,
runtime_vecdif_ms,runtime_vecsum_ms,runtime_red_count_ms;
err = cudaEventElapsedTime(&runtime_init_ms, pre_init, post_init);
cuda_check(err, "elapsed time init");
err = cudaEventElapsedTime(&runtime_prodotto_ms, pre_prodotto, post_prodotto);
cuda_check(err, "elapsed time prodotto");
runtime_red_count_ms=0;
for(j=0;j<lr;j++){
err = cudaEventElapsedTime(&runtime_red_ms, pre_red[j], post_red[j]);
cuda_check(err, "elapsed time reduction");
runtime_red_count_ms+=runtime_red_ms;
}
err = cudaEventElapsedTime(&runtime_transpose_ms, pre_transpose, post_transpose);
cuda_check(err, "elapsed time traspose");
err = cudaEventElapsedTime(&runtime_scalar_matrice_ms, pre_scalar_matrice, post_scalar_matrice);
cuda_check(err, "elapsed time scalar_matrice");
err = cudaEventElapsedTime(&runtime_vecdif_ms, pre_vecdif, post_vecdif);
cuda_check(err, "elapsed time vecdif");
err = cudaEventElapsedTime(&runtime_vecsum_ms, pre_vecsum, post_vecsum);
cuda_check(err, "elapsed time vecsum");
printf("init: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_init_ms, nels/runtime_init_ms/1.0e6, memsize/runtime_init_ms/1.0e6);
printf("prodotto: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_prodotto_ms, nels/runtime_prodotto_ms/1.0e6, memsize/runtime_prodotto_ms/1.0e6);
printf("reduction: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_red_count_ms, nels_red/runtime_red_count_ms/1.0e6, (nels_red*sizeof(float))/runtime_red_count_ms/1.0e6);
printf("transpose: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_transpose_ms, N/runtime_transpose_ms/1.0e6, (N*sizeof(float))/runtime_transpose_ms/1.0e6);
printf("scalareMatrice: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_scalar_matrice_ms, N/runtime_scalar_matrice_ms/1.0e6, (N*sizeof(float))/runtime_scalar_matrice_ms/1.0e6);
printf("vecdif: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecdif_ms, N/runtime_vecdif_ms/1.0e6, (N*sizeof(float))/runtime_vecdif_ms/1.0e6);
printf("vecsum: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecsum_ms, N/runtime_vecsum_ms/1.0e6, (N*sizeof(float))/runtime_vecsum_ms/1.0e6);
}
cudaFree(matriceA);
cudaFreeHost(matrice);
cudaFree(somma);
cudaFree(res);
cudaFree(pk);
cudaFree(trasposta);
cudaFree(prodotto);
cudaFree(den);
cudaFree(res0);
cudaFree(res1);
cudaFree(res2);
cudaFree(red_den);
cudaFree(scalar);
cudaFree(matriceB);
cudaFree(matriceX);
cudaFreeHost(num);
cudaFreeHost(deno);
}
|
6,292 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <iostream>
#define NUM_ELEMENTS 8192
// Non interleaved structure definition
typedef unsigned int ARRAY_MEMBER_T[NUM_ELEMENTS];
typedef struct {
ARRAY_MEMBER_T a;
ARRAY_MEMBER_T b;
ARRAY_MEMBER_T c;
ARRAY_MEMBER_T d;
} NON_INTERLEAVED_T;
// Multiply kernel
__global__ void multiply_kernel(
NON_INTERLEAVED_T * const dest_ptr,
NON_INTERLEAVED_T * const src_ptr,
const unsigned int num_elements) {
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid < num_elements)
{
dest_ptr->a[tid] *= src_ptr->a[tid];
dest_ptr->b[tid] *= src_ptr->b[tid];
dest_ptr->c[tid] *= src_ptr->c[tid];
dest_ptr->d[tid] *= src_ptr->d[tid];
}
}
int main(void)
{
// Define structs
int bytes = sizeof(NON_INTERLEAVED_T);
NON_INTERLEAVED_T *x, *y, *x_pin, *y_pin, *d_x, *d_y;
const unsigned int num_threads = 256;
const unsigned int num_blocks = (NUM_ELEMENTS + (num_threads-1)) / num_threads;
// Define measurement
cudaEvent_t kernel_start, kernel_stop;
cudaEvent_t kernel_start1, kernel_stop1;
cudaEventCreate(&kernel_start,0);
cudaEventCreate(&kernel_stop,0);
cudaEventCreate(&kernel_start1,0);
cudaEventCreate(&kernel_stop1,0);
// Allocate pageable memory
x = (NON_INTERLEAVED_T*)malloc(bytes);
y = (NON_INTERLEAVED_T*)malloc(bytes);
// Allocate pinned memory
cudaMallocHost((void**)&x_pin, bytes);
cudaMallocHost((void**)&y_pin, bytes);
// Allocate device memory
cudaMalloc(&d_x, bytes);
cudaMalloc(&d_y, bytes);
// Fill data
float x_val = 3.0f;
float y_val = 2.0f;
for (int i = 0; i < NUM_ELEMENTS; i++) {
x->a[i] = x_val;
x->b[i] = x_val;
x->c[i] = x_val;
x->d[i] = x_val;
y->a[i] = y_val;
y->b[i] = y_val;
y->c[i] = y_val;
y->d[i] = y_val;
x_pin->a[i] = x_val;
x_pin->b[i] = x_val;
x_pin->c[i] = x_val;
x_pin->d[i] = x_val;
y_pin->a[i] = y_val;
y_pin->b[i] = y_val;
y_pin->c[i] = y_val;
y_pin->d[i] = y_val;
}
cudaEventRecord(kernel_start, 0);
// Copy pageable memory
cudaMemcpy(d_x, x, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, bytes, cudaMemcpyHostToDevice);
// Apply Kernel
multiply_kernel<<<num_blocks, num_threads>>>(d_y, d_x, NUM_ELEMENTS);
// Copy data back
cudaMemcpy(y, d_y, bytes, cudaMemcpyDeviceToHost);
// Display metrics
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
float delta = 0.0F;
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
std::cout << "Pageable multiply took " << delta << std::endl;
cudaEventRecord(kernel_start1, 0);
// Copy pinned memory
cudaMemcpy(d_x, x_pin, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y_pin, bytes, cudaMemcpyHostToDevice);
// Apply kernel
multiply_kernel<<<num_blocks, num_threads>>>(d_y, d_x, NUM_ELEMENTS);
// Copy memory back
cudaMemcpy(y_pin, d_y, bytes, cudaMemcpyDeviceToHost);
// Display metrics
cudaEventRecord(kernel_stop1, 0);
cudaEventSynchronize(kernel_stop1);
float delta1 = 0.0F;
cudaEventElapsedTime(&delta1, kernel_start1, kernel_stop1);
std::cout << "Pinned multiply took " << delta1 << std::endl;
// // Print some values for validation
// for(int i = NUM_ELEMENTS-3; i < NUM_ELEMENTS; i++) {
// std::cout << "y_pin.a[" << i << "] = " << y_pin->a[i] << std::endl;
// std::cout << "y.a[" << i << "] = " << y->a[i] << std::endl;
// std::cout << "y_pin.b[" << i << "] = " << y_pin->b[i] << std::endl;
// std::cout << "y.b[" << i << "] = " << y->b[i] << std::endl;
// std::cout << "y_pin.c[" << i << "] = " << y_pin->c[i] << std::endl;
// std::cout << "y.c[" << i << "] = " << y->c[i] << std::endl;
// std::cout << "y_pin.d[" << i << "] = " << y_pin->d[i] << std::endl;
// std::cout << "y.d[" << i << "] = " << y->d[i] << std::endl;
// }
// House keeping
cudaEventDestroy(kernel_start);
cudaEventDestroy(kernel_stop);
cudaEventDestroy(kernel_start1);
cudaEventDestroy(kernel_stop1);
cudaFreeHost(x_pin);
cudaFreeHost(x_pin);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
6,293 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
__global__ void multiply(float* Md, float* Nd, float* Pd, int Width){
//int Row = blockIdx.y * blockDim.y + threadIdx.y;
//int Col = blockIdx.x * blockDim.x + threadIdx.x;
float Pvalue = 0;
for (int k = 0; k < Width; ++k){
Pvalue += Md[threadIdx.y*Width+k] * Nd[k*Width+threadIdx.x];
}
Pd[threadIdx.y*Width+threadIdx.x] = Pvalue;
}
int main(){
float t= 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//HANDLE_ERROR(cudaEventRecord(start, 0));
//determines properties of matrix i.e. nxn matrix
int n = 16;
float A[(n * n)], B[(n *n)], C[(n * n)];
int size = (n * n) * sizeof(float);
float *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
for(int i =0; i< (n * n); i++){
if (i%2 == 0){
A[i] = 0;
B[i] = 1;
}
}
cudaMemcpy(d_a, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, size, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
multiply<<<1,256>>> (d_a, d_b, d_c, n);
cudaMemcpy(C, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaFree(d_b);
cudaFree(d_a);
cudaEventRecord(stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime(&t, start, stop);
printf("Time: %.2f ms \n", t);
return 0;
}
|
6,294 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define ThreadSize 16
__global__ void MatMulKernel( int *dD, int *dE, int *dF, int N ) {
int Fvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ( row < (N/2) && col < (N/2) ) {
for ( int i=0; i<(N); i++ ) {
Fvalue+= dD[(row)*(N)+i] * dE[i*(N/2)+col];
}
dF[row*(N/2)+col]=Fvalue;
}
}
extern "C" int CUDA_stuff(int *D, int *E, int *F, int N )
{
int i, j;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaError_t err;
// printf ( "N = %d\n", N );
int *dD, *dE, *dF;
// if(my_rank==0){
/* printf("E Matrix is");
for(i=0;i<(N/2);i++){
for(j=0;j<(N/2);j++){
printf("~%d ",E[i*(N/2)+j]);
}
printf("\n");
}
printf("D Matrix is");
for(i=0;i<(N/2);i++){
for(j=0;j<(N/2);j++){
printf("~%d ",D[i*(N/2)+j]);
}
printf("\n");
}
//}*/
// Allocate the memory on the GPU
cudaEventRecord(start,0);
err = cudaMalloc ( (void**) &dD, (N/2)*(N)*sizeof(int));
// printf ( "CUDA malloc dD: %s\n", cudaGetErrorString(err));
err = cudaMalloc ((void**) &dE, (N/2)*(N)*sizeof(int));
// printf ( "CUDA malloc dE: %s\n", cudaGetErrorString(err));
err = cudaMalloc ((void**) &dF, (N/2)*(N/2)*sizeof(int));
// printf ( "CUDA malloc dF: %s\n", cudaGetErrorString(err));
// Copy the memory to the GPU.
err = cudaMemcpy(dD, D, (N/2)*(N)*sizeof(int), cudaMemcpyHostToDevice );
// printf ( "Copy D to device: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(dE, E, (N/2)*(N)*sizeof(int), cudaMemcpyHostToDevice );
// printf ( "Copy E to device: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(dF, F, (N/2)*(N/2)*sizeof(int), cudaMemcpyHostToDevice );
// printf ( "Copy F to device: %s\n", cudaGetErrorString(err));
/*
for(i=0;i<(N/2);i++){
for(j=0;j<(N/2);j++){
printf("%d ",D[i*(N/2)+j]);
}
printf("\n");
}*/
dim3 dimBlock(ThreadSize,ThreadSize,1);
dim3 dimGrid(((N/ThreadSize)+1),((N/ThreadSize)+1),1);
// Perform the operation on the GPU
MatMulKernel <<< dimGrid, dimBlock >>> (dD, dE, dF, N);
// Copy back the results from the GPU to the CPU
err = cudaMemcpy ( F, dF, (N/2)*(N/2)*sizeof(int), cudaMemcpyDeviceToHost );
// printf ( "Copy F off of device %s\n", cudaGetErrorString(err) );
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float GPUelapsed;
cudaEventElapsedTime(&GPUelapsed,start,stop);
//printf("\n\nGPU Elapsed time:%f\n\n",GPUelapsed);
/* for ( i=0; i<N/2; i++ ) {
for ( j=0; j<N/2; j++ ) {
printf ( "%3d ", F[i*N/2+j] );
}
printf ( "\n" );
}
printf ( "\n" );
*/
printf ( "Success!\n" );
return(0);
}
|
6,295 | __global__
void vecAdd(float *l, float *r, float *result, size_t N) {
size_t i = threadIdx.x;
if (l[i] > i) {
goto LABEL1;
} else {
goto LABEL2;
}
LABEL1:
result[i] = exp(l[i]);
goto END;
LABEL2:
result[i] = l[i] + r[i];
goto END;
END:
return;
}
|
6,296 | #include <iostream>
/// This is what the add.ptx is compiled from
/// "nvcc add.cu --ptx"
extern "C" __global__ void sum(const float* x, const float* y, float* out, int count) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
out[i] = x[i] + y[i];
}
}
/// From this PDF: https://www.fz-juelich.de/SharedDocs/Downloads/IAS/JSC/EN/slides/cuda/05-cuda-mm.pdf?__blob=publicationFile
/// Also, see this similar guide: https://www.tutorialspoint.com/cuda/cuda_matrix_multiplication.htm
/// It's called like so:
/// mm_kernel<<<dimGrid, dimBlock>>> (d_a, d_b, d_c, n);
/// Naive. Can this be done using shared memory?
/// Also, this accesses global memory (A and B) twice per loop. See: https://www.tutorialspoint.com/cuda/cuda_performance_considerations.htm
extern "C" __global__ void mm_kernel(float* A, float* B, float* C, int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x; // block Index * how wide the block is + thread index
int row = blockIdx.y * blockDim.y + threadIdx.y;
// printf("(%d, %d)", row, col);
if (row < n && col < n) {
float sum = 0.0f;
for (int i = 0; i < n; ++i) {
sum += A[row * n + i] * B[i * n + col];
}
C[row * n + col] = sum;
}
}
/// From: https://stackoverflow.com/questions/18997773/non-square-matrix-multiplication-in-cuda
/// This works!
#define TILE_DIM 16
extern "C" __global__ void mm_noshared(float* A, float* B, float* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) {
float CValue = 0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {
for (int n = 0; n < TILE_DIM; ++n)
if ((k*TILE_DIM + n < ACols && Row < ARows) && (k*TILE_DIM + n < BRows && Col < BCols))
CValue += A[Row*ACols + k*TILE_DIM + n] * B[(k*TILE_DIM + n)*BCols + Col];
}
if (Row < CRows && Col < CCols) C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols)+(blockIdx.x*blockDim.x)+threadIdx.x]=CValue;
}
/// From: https://stackoverflow.com/questions/18815489/cuda-tiled-matrix-matrix-multiplication-with-shared-memory-and-matrix-size-whic
/// This does use shared memory!!!
/// Untested.
__global__ void MatMul(float* A, float* B, float* C, int ARows, int ACols, int BRows,
int BCols, int CRows, int CCols)
{
float CValue = 0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows)
As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols)
Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n)
CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < CRows && Col < CCols)
C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = CValue;
}
/// TEST -- NOT EXPORTED
/// CUDA kernel to add elements of two arrays
__global__ void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
/// See: https://developer.nvidia.com/blog/even-easier-introduction-cuda/
int main(void) {
int N = 4;
float *x, *y, *out;
// Allocate Unified Memory -- accessible from CPU or GPU
// See: https://developer.nvidia.com/blog/unified-memory-cuda-beginners/
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
cudaMallocManaged(&out, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch kernel on 1M elements on the GPU
dim3 dimBlock(16, 16);
mm_kernel<<<1, dimBlock>>>(x, y, out, 2);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Print out the matrix
for (int i = 0; i < N; ++i) {
std::cout << out[i] << " ";
}
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
6,297 | #include "includes.h"
__global__ void cunn_SpatialLogSoftMax_updateGradInput_kernel(float *gradInput, float *output, float *gradOutput, int classSize, int height, int width)
{
int batchIndex = blockIdx.x;
int index = threadIdx.x;
while (index < height*width) {
int y = index / width;
int x = index % width;
if (y >= height)
break;
// calculate output starting index in cuda layout (B x H x W x C)
int outputStartIndex =
(height*width*classSize)*batchIndex +
(width*classSize)*y +
(classSize)*x;
float sum = 0;
for (int i = 0; i < classSize; i++) {
sum += gradOutput[outputStartIndex + i];
}
for (int i = 0; i < classSize; i++) {
// calculate input index in torch layout (B x C x H x W)
int inputIndex =
(classSize*height*width)*batchIndex +
(height*width)*i +
(width)*y +
x;
gradInput[inputIndex] = gradOutput[outputStartIndex + i] - __expf(output[outputStartIndex + i]) * sum;
}
index += blockDim.x;
}
} |
6,298 | // 20181010
// Yuqiong Li
// a basic CUDA function to familiarize with usage
#include<stdio.h>
#include<cuda.h>
// function declarations
__global__ void vecAddKernel(float * a, float * b, float * c, unsigned int N);
// main function
int main()
{
int N = 10; // length of vector
float * a, * b, * c; // a and b are vectors. c is the result
unsigned int size = N * sizeof(float); // number of bytes to allocate
a = (float *)calloc(N, sizeof(float));
b = (float *)calloc(N, sizeof(float));
int i = 0;
float sum = 0;
for (i = 0; i < N; i++){
a[i] = (float)i / 0.23 + 1;
b[i] = (float)i / 5.89 + 9;
sum += a[i] + b[i];
}
c = (float*) malloc(size);
// 1. allocate memory on CUDA
float * d_a, * d_b, * d_c; // device memory
cudaError_t err1 = cudaMalloc((void **) & d_a, size);
cudaError_t err2 = cudaMalloc((void **) & d_b, size);
cudaError_t err3 = cudaMalloc((void **) & d_c, size);
if (err1 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err1), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if (err2 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if (err3 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// copy memory
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// 2. operate on kernels
vecAddKernel<<<ceil(N/256.0), 256>>>(d_a, d_b, d_c, N);
// 3. copy the results back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
float cuda_res = 0;
for(i = 0; i < N; i++){
printf("%.2f\t", c[i]);
cuda_res += c[i];
}
printf("Results from host :%.2f\n", sum);
printf("Results from device:%.2f\n", cuda_res);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
__global__
void vecAddKernel(float * a, float * b, float * c, unsigned int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<N) c[i] = a[i] + b[i];
}
|
6,299 | #include "includes.h"
using namespace std;
#define N 32
__global__ void multSquareMatrix(int *A, int *B, int *result, int n)
{
int k, sum = 0;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
for (k = 0; k < n; k++) {
sum += A[row * n + k] * B[k * n + col];
result[row * n + col] = sum;
}
} |
6,300 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand_kernel.h>
#define N 100 // total number of items in vectors
#define nthreads 4 // total number of threads in a block
__global__ void estimatepi(int n, int *sum)
{
__shared__ int counter[nthreads];
int threadID;
threadID = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int seed = threadID;
curandState s;
curand_init(seed, 0, 0, &s);
if(threadID < n){
double x, y, diff, angle;
int t;
counter[threadIdx.x] = 0;
for (t = 0; t<n; t++){
x = curand_uniform(&s); //curand
y = curand_uniform(&s); //curand
while(x*x + y*y > 1){
x = curand_uniform(&s); //curand
y = curand_uniform(&s); //rand
}
angle = atan2 ( y, x ); //use inverse tan;
diff = curand_uniform(&s);
if(diff <= sin (angle) *2){
counter[threadIdx.x] = counter[threadIdx.x] + 1;
}
}
if(threadIdx.x == 0){
sum[blockIdx.x] = 0;
for(int i=0; i<nthreads; i++) {
sum[blockIdx.x] = sum[blockIdx.x] + counter[i];
}
}
}
}
int main()
{
srand(time(NULL));
int *sum_h;
int *sum_d;
sum_h = (int*)malloc( N* sizeof(int));
cudaMalloc((void**)&sum_d, N * sizeof(int));
int nblocks = (N + nthreads - 1)/nthreads;
estimatepi<<<nblocks,nthreads>>>(N,sum_d);
cudaMemcpy(sum_h, sum_d, N * sizeof(int), cudaMemcpyDeviceToHost);
int success = 0;
for(int i = 0; i < nblocks; i++){
success = sum_h[i] + success;
}
printf("trials === %d", N * nblocks * nthreads );
printf(" success === %d\n", success);
double pi_estimate = 2 * N * nthreads * nblocks/( double )success;
printf("pi_estimate == %f", pi_estimate);
printf("\n");
cudaFree(sum_d);
free(sum_h);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.