serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
5,901 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float* var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15) {
float tmp_1 = -0.0f;
comp += tmp_1 + var_2 - var_3 / (var_4 - ceilf((+1.3768E34f - var_5 * var_6 * (+1.5851E-37f / (+0.0f / var_7)))));
for (int i=0; i < var_1; ++i) {
var_8[i] = var_9 - tanhf(var_10 / var_11 * -0.0f);
comp = var_8[i] - var_12 - var_13 + (+1.6909E-42f - var_14);
comp += atanf(var_15 * +1.3157E-42f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float* tmp_9 = initPointer( atof(argv[9]) );
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16);
cudaDeviceSynchronize();
return 0;
}
|
5,902 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cstdlib>
__global__ void kernelDeduct(double *a, double* b, double* c, size_t n) {
size_t i = blockDim.x*blockIdx.x + threadIdx.x;
size_t offset = gridDim.x*blockDim.x;
for (; i < n; i+= offset){
c[i] = a[i] - b[i];
}
}
int main() {
size_t n;
scanf("%lu\n", &n);
double *a = (double *)malloc(sizeof(double)*n);
double *b = (double *)malloc(sizeof(double)*n);
double *c = (double *)malloc(sizeof(double)*n);
for (size_t i = 0; i < n; i++)
scanf("%lf", &a[i]);
for (size_t i = 0; i < n; i++)
scanf("%lf", &b[i]);
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
printf("ERROR: %s\n", "cudaSetDevice check failed. You must have at least one Nvidia GPU!");
return 0;
}
double *va, *vb, *vc;
cudaStatus = cudaMalloc((void**)&va, n*sizeof(double));
if (cudaStatus != cudaSuccess) {
printf("ERROR: %s\n", "Can't allocate video memory");
return 0;
}
cudaStatus = cudaMalloc((void**)&vb, n*sizeof(double));
if (cudaStatus != cudaSuccess) {
printf("ERROR: %s\n", "Can't allocate video memory");
return 0;
}
cudaStatus = cudaMalloc((void**)&vc, n*sizeof(double));
if (cudaStatus != cudaSuccess) {
printf("ERROR: %s\n", "Can't allocate video memory");
return 0;
}
cudaStatus = cudaMemcpy(va, a, n*sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("ERROR: %s\n", "Can't copy from ram to videomemory");
return 0;
}
cudaStatus = cudaMemcpy(vb, b, n*sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
printf("ERROR: %s\n", "Can't copy from ram to videomemory");
return 0;
}
kernelDeduct <<<16384, 512>>>(va, vb, vc, n);
cudaStatus = cudaMemcpy(c, vc, n*sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
printf("ERROR: %s\n", "Can't copy from videomemory to ram");
return 0;
}
cudaFree(va);
cudaFree(vb);
cudaFree(vc);
for (size_t i = 0; i < n; i++)
printf("%.10e ", c[i]);
printf("\n");
return 0;
}
|
5,903 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cstdlib>
#include "cufft.h"
using namespace std;
#define TILE_X 16
#define TILE_Y 16
#define MARGIN 35
//ڴ洢˲ϵijڴ
__constant__ double c_lpFilter[MARGIN];
__constant__ double c_hpFilter[MARGIN];
__constant__ double c_iLpFilter[MARGIN];
__constant__ double c_iHpFilter[MARGIN];
//-------------------------------------------------------------------任------------------------------------------------------------------------------------------------
//ԶάݵнС任һά,lp_output,hp_outputѾתùĴС,һ˺нչ²ת
__global__ void Dwt1D_Row(double *data,int width,int height,size_t pitch_data,int filter_len,
double *lp_output,size_t pitch_lpout,double *hp_output,size_t pitch_hpout)
{
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
int aftWidth=(width+filter_len-1)/2;//+ij
if(col>=aftWidth||row>=height)
return;
double* row_data=(double*)((char*)data+row*pitch_data);//ݵʼеַ
double* row_lpout=(double*)((char*)lp_output+col*pitch_lpout);//ͨ˲ʼеַ
double* row_hpout=(double*)((char*)hp_output+col*pitch_hpout);//ͨ˲ʼеַ
int symIndex=filter_len+2*col;//ֻֽһֻ֣tmpIndexǵõǸڶԳչеλ
int oriIndex=symIndex-filter_len+1;//symIndex-(filter_len-1),ڼҪȡԭʼеλ,2*col+1
double lp_result=0;//ͨ˲ľ
double hp_result=0;//ͨ˲ľ
#pragma unroll
for(int i=0;i<filter_len;i++)
{
int index=oriIndex-i;
double tmpValue;//洢ݵֵ
if(index<0)
{
tmpValue=row_data[-index-1];//-1ȡ0-2ȡ1-3ȡ2-4ȡ3-5ȡ4,|index|-1
}
else
{
if(index>=width)
tmpValue=row_data[2*width-index-1];//lastIndex-[(index-lastIndex)-1];(width-1)-{[index-(width-1)]-1}
else
tmpValue=row_data[index];
}
lp_result+=tmpValue*c_lpFilter[i];
hp_result+=tmpValue*c_hpFilter[i];
}
//ֱӽת
row_lpout[row]=lp_result;//ĿǾһ룬߶Ȳ
row_hpout[row]=hp_result;
}
//ԶάݵнС任һά㣬зϵһһģմСҲһģԿ̺ϲһ˺У
//һתúĽһڴһּCLLһյĴλá
//widthѾתúĿȣheightѾתúĸ߶ȣ˺IJпȺ߶ȷֱwidthheightΪ,
__global__ void Dwt1D_Col(double *lpOutput,size_t pitch_lpout,double *hpOutput,size_t pitch_hpout,int width,int height,
int filter_len,double *CLL,size_t pitch_cll,double *output,int offset)
{
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
int aftWidth=(width+filter_len-1)/2;//+ij
if(col>=aftWidth||row>=height)
return;
double* row_lpData=(double*)((char*)lpOutput+row*pitch_lpout);//ݵʼеַ
double* row_hpData=(double*)((char*)hpOutput+row*pitch_hpout);//ݵʼеַ
double* row_cll=(double*)((char*)CLL+col*pitch_cll);//CLLʼеַ
int symIndex=filter_len+2*col;//ֻֽһֻ֣tmpIndexǵõǸڶԳչеλ
int oriIndex=symIndex-filter_len+1;//symIndex-(filter_len-1),ڼҪȡԭʼеλ,2*col+1
double cll_result=0;//CLLĽ
double clh_result=0;//CLHĽ
double chl_result=0;//CLLĽ
double chh_result=0;//CLHĽ
#pragma unroll
for(int i=0;i<filter_len;i++)
{
int index=oriIndex-i;
double lpTmpValue;//洢lpOoutputݵֵ
double hpTmpValue;//洢hpOoutputݵֵ
if(index<0)
{
lpTmpValue=row_lpData[-index-1];//-1ȡ0-2ȡ1-3ȡ2-4ȡ3-5ȡ4,|index|-1
hpTmpValue=row_hpData[-index-1];
}
else
{
if(index>=width)
{
lpTmpValue=row_lpData[2*width-index-1];//lastIndex-[(index-lastIndex)-1];(width-1)-{[index-(width-1)]-1}
hpTmpValue=row_hpData[2*width-index-1];
}
else
{
lpTmpValue=row_lpData[index];
hpTmpValue=row_hpData[index];
}
}
cll_result+=lpTmpValue*c_lpFilter[i];
clh_result+=lpTmpValue*c_hpFilter[i];
chl_result+=hpTmpValue*c_lpFilter[i];
chh_result+=hpTmpValue*c_hpFilter[i];
}
//ֱӽת
//offset=totalsize-sum_stepοһWaveletе
row_cll[row]=cll_result;
output[offset-3*aftWidth*height+col*height+row]=clh_result;
output[offset-2*aftWidth*height+col*height+row]=chl_result;
output[offset-aftWidth*height+col*height+row]=chh_result;
}
//С任
void Dwt2D(double *data,int data_width,int data_height,size_t pitch_data,int filter_len,double *CLL,size_t pitch_cll,double *output,int offset)
{
//----------------------һΣȶάݽзϵı任----------------------------------------------------
//ڵһתúڴʱֱӰתúĴС
int height=(data_width+filter_len-1)/2;//תúĸ߶ȣԭ
int width=data_height;//תúĿȣԭ߶
//
double *lpOutput;
double *hpOutput;
size_t pitch_lpout;
size_t pitch_hpout;
cudaMallocPitch((void **)&lpOutput,&pitch_lpout,sizeof(double)*width,height);
cudaMallocPitch((void **)&hpOutput,&pitch_hpout,sizeof(double)*width,height);
dim3 threads(TILE_X,TILE_Y);
dim3 blocks_row((height+TILE_X-1)/TILE_X,(width+TILE_Y-1)/TILE_Y);//ԭȺԭ߶ȷgridС
Dwt1D_Row<<<blocks_row,threads>>>(data,data_width,data_height,pitch_data,filter_len,lpOutput,pitch_lpout,hpOutput,pitch_hpout);
//----------------------ڶΣ֮ǰõѾתúľзϵı任----------------------------------------
int aftwidth=(width+filter_len-1)/2;
dim3 blocks_col((aftwidth+TILE_X-1)/TILE_X,(height+TILE_Y-1)/TILE_Y);
Dwt1D_Col<<<blocks_col,threads>>>(lpOutput,pitch_lpout,hpOutput,pitch_hpout,width,height,filter_len,CLL,pitch_cll,output,offset);
//ͷ
cudaFree(lpOutput);
cudaFree(hpOutput);
}
//άС任
extern "C" void Dwt2D_CUDA( int level,int *length,
double *originData,int data_width,int data_height,
double *lpfilter,double *hpfilter,int filter_len,
double *output,int total_size)
{
int height=data_height;
int width=data_width;
int sum_step=0;
double *d_output;
cudaMalloc((void **)&d_output,sizeof(double)*total_size);
//----------------------ѶάݸƵGPU------------------------------------------------------
double *d_data;
size_t pitch_data;
cudaMallocPitch((void **)&d_data,&pitch_data,sizeof(double)*width,height);
cudaMemcpy2D(d_data,pitch_data,originData,sizeof(double)*width,sizeof(double)*width,height,cudaMemcpyHostToDevice); //֮ǰsizeof(double)*widthд*heightݴ
cudaMemcpyToSymbol(c_lpFilter, lpfilter, filter_len*sizeof(double));
cudaMemcpyToSymbol(c_hpFilter, hpfilter, filter_len*sizeof(double));
//----------------------ݸƽ-----------------------------------------------------------
//----------------------JСֽ-----------------------------------------------------------
for(int iter=0;iter<level;iter++)
{
int next_width=(width+filter_len-1)/2;
int next_height=(height+filter_len-1)/2;
//洢ÿһ任ij
length[iter*2]=next_height;
length[iter*2+1]=next_width;
int offset=total_size-sum_step;//CLH,CHL,CHHoutputеλ
double *cLL;//洢һС任ijʼ
size_t pitch_cll;
cudaMallocPitch((void **)&cLL,&pitch_cll,sizeof(double)*next_width,next_height);
//ִжά任
Dwt2D(d_data,width,height,pitch_data,filter_len,cLL,pitch_cll,d_output,offset);
width=next_width;
height=next_height;
//һѭijʼ
cudaFree(d_data);
d_data=cLL;
pitch_data=pitch_cll;
//洢
if(iter==level-1)
{
cudaMemcpy2D((d_output+offset-4*next_width*next_height),sizeof(double)*next_width,cLL,pitch_cll,sizeof(double)*next_width,next_height,cudaMemcpyDeviceToDevice);
cudaFree(cLL);
}
sum_step+=next_width*next_height*3;
}
//----------------------JСֽ-----------------------------------------------------------
cudaMemcpy(output,d_output,sizeof(double)*total_size,cudaMemcpyDeviceToHost);
cudaFree(d_data);
cudaFree(d_output);
}
//-------------------------------------------------------------------任------------------------------------------------------------------------------------------------
//С任зϵ
//֮ǰķֽеȡCLLٴȫֽͨoffsetȡCLHCHLCHHֵݣoffsetһֽܴС
//ȡʱֱһһеȡ൱תú㣬൱תúУзϵļ㣬з
//ϵļ㲻Ҫٽתãú˺IJʱblocksĿnextHeight߶ԭwidthΪݣתúĽС
//еwidth,heightתúģ൱ԭheight,ԭwidth
//CLLʹPitchʽΪҪȡʺPitchʽҲΪԭiDwt1D_rowoutputҲPitchʽ
__global__ void iDwt1D_Col(double *CLL,double *data,int offset,int width,int height,int nextWidth,int filter_len,
double *app,size_t pitch_app,double *detail,size_t pitch_detail)
{
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
if(col>=nextWidth||row>=height)
return;
//ʣݵʼλ
int clh_begin=offset;
int chl_begin=offset+width*height;
int chh_begin=offset+2*width*height;
//
double* row_appData=(double*)((char*)app+col*pitch_app);//ʼеַcol൱תúк
double* row_detailData=(double*)((char*)detail+col*pitch_detail);//ʼеַcol൱תúк
int oriIndex=col+filter_len-2;//ʼϲλ
double cll_result=0;
double clh_result=0;
double chl_result=0;
double chh_result=0;
#pragma unroll
for(int i=0;i<filter_len;i++)
{
int index=oriIndex-i;
double cllTmpValue=0;
double clhTmpValue=0;
double chlTmpValue=0;
double chhTmpValue=0;
//СֽȡݲҪλòϲУȡ0ڣǷżżԭʼȡ/2λõ
//ֱȡ0
if(index>=0&&index<(2*width-1)&&(index%2)==0)
{
cllTmpValue=CLL[index/2*height+row];//һһеȡ
clhTmpValue=data[clh_begin+index/2*height+row];
chlTmpValue=data[chl_begin+index/2*height+row];
chhTmpValue=data[chh_begin+index/2*height+row];
}
else
{
cllTmpValue=0;
clhTmpValue=0;
chlTmpValue=0;
chhTmpValue=0;
}
cll_result+=cllTmpValue*c_iLpFilter[i];
clh_result+=clhTmpValue*c_iHpFilter[i];
chl_result+=chlTmpValue*c_iLpFilter[i];
chh_result+=chhTmpValue*c_iHpFilter[i];
}
//൱תú
row_appData[row]=cll_result+clh_result;
row_detailData[row]=chl_result+chh_result;
}
//С任зϵ
//зϵõֱٽзϵľ㣬ɻԭݣ˺IJblocksĿȺ߶ȷֱnextWidth,
//nextHeightΪݣһ˲ת
__global__ void iDwt1D_Row(double *app,size_t pitch_app,double *detail,size_t pitch_detail,int width,int height,
int nextWidth,int filter_len,double *output)
{
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
if(col>=nextWidth||row>=height)
return;
//ݺеʼλ
double* row_appData=(double*)((char*)app+row*pitch_app);
double* row_detailData=(double*)((char*)detail+row*pitch_detail);
int oriIndex=col+filter_len-2;
double app_result=0;
double detail_result=0;
#pragma unroll
for(int i=0;i<filter_len;i++)
{
int index=oriIndex-i;
double appTmpValue=0;
double detailTmpValue=0;
if(index>=0&&index<(2*width-1)&&(index%2)==0)
{
appTmpValue=row_appData[index/2];
detailTmpValue=row_detailData[index/2];
}
else
{
appTmpValue=0;
detailTmpValue=0;
}
app_result+=appTmpValue*c_iLpFilter[i];
detail_result+=detailTmpValue*c_iHpFilter[i];
}
output[row*nextWidth+col]=app_result+detail_result;
}
//άС任
extern "C" void iDwt2D_CUDA
(
int level,int *length,
double *data,int data_size,
double *lpfilter,double *hpfilter,int filter_len,
double *output
)
{
int height=length[0];
int width=length[1];
int offset=height*width;
double *CLL;
cudaMalloc((void **)&CLL,sizeof(double)*width*height);
//----------------------------------ݸƽ---------------------------------------------
double *d_data;
cudaMalloc((void **)&d_data,sizeof(double)*data_size);
cudaMemcpy(d_data,data,sizeof(double)*data_size,cudaMemcpyHostToDevice);
cudaMemcpy(CLL,d_data,sizeof(double)*width*height,cudaMemcpyDeviceToDevice);//ʼCLL
cudaMemcpyToSymbol(c_iLpFilter, lpfilter, filter_len*sizeof(double));
cudaMemcpyToSymbol(c_iHpFilter, hpfilter, filter_len*sizeof(double));
//----------------------------------ݸƽ------------------------------------------
//----------------------------------JСع------------------------------------------------
dim3 threads(TILE_X,TILE_Y);
for(int iter=0;iter<level;iter++)
{
//ǰĸ߿
int tmp_height=length[2*iter];
int tmp_width=length[2*iter + 1];
//һĸ߿
int next_height=length[2*iter+2];
int next_width=length[2*iter+3];
//----------------------һΣȽзϵ任---------------------------------
double *app;
double *detail;
size_t pitch_app;
size_t pitch_detail;
cudaMallocPitch((void **)&app,&pitch_app,sizeof(double)*tmp_width,next_height);
cudaMallocPitch((void **)&detail,&pitch_detail,sizeof(double)*tmp_width,next_height);
dim3 blocks_col((next_height+TILE_X-1)/TILE_X,(tmp_width+TILE_Y-1)/TILE_Y);
iDwt1D_Col<<<blocks_col,threads>>>(CLL,d_data,offset,tmp_height,tmp_width,next_height,filter_len,app,pitch_app,detail,pitch_detail);
//----------------------ڶΣзϵ任-------------------------------------
cudaFree(CLL);
cudaMalloc((void **)&CLL,sizeof(double)*next_height*next_width);//֮ǰCLLݣCLLΪһѭijʼ
dim3 blocks_row((next_width+TILE_X-1)/TILE_X,(next_height+TILE_Y-1)/TILE_Y);
iDwt1D_Row<<<blocks_row,threads>>>(app,pitch_app,detail,pitch_detail,tmp_width,next_height,next_width,filter_len,CLL);
offset+=3*tmp_height*tmp_width;
cudaFree(app);
cudaFree(detail);
}
//-----------------------------JСع--------------------------------------------------
//ƻ
height=length[2*level];
width=length[2*level+1];
cudaMemcpy(output,CLL,sizeof(double)*height*width,cudaMemcpyDeviceToHost);
cudaFree(CLL);
cudaFree(d_data);
}
/*
//ڻȡתú
__global__ void getTransposeData_shared(double *CLL,size_t pitch_CLL,double *data,int offset,int width,int height,
double *tr_cll,size_t pitch_cll,double *tr_clh,size_t pitch_clh,
double *tr_chl,size_t pitch_chl,double *tr_chh,size_t pitch_chh)
{
int col=blockIdx.x*blockDim.x+threadIdx.x;//width
int row=blockIdx.y*blockDim.y+threadIdx.y;//height
if(col>=width||row>=height)
return;
int clh_begin=offset;
int chl_begin=offset+width*height;
int chh_begin=offset+2*width*height;
double* row_CLL=(double*)((char*)CLL+row*pitch_CLL);
//ʼеַ
double* row_cll=(double*)((char*)tr_cll+col*pitch_cll);
double* row_clh=(double*)((char*)tr_clh+col*pitch_clh);
double* row_chl=(double*)((char*)tr_chl+col*pitch_chl);
double* row_chh=(double*)((char*)tr_chh+col*pitch_chh);
__shared__ double s_Data[TILE_Y][TILE_X*4];
s_Data[threadIdx.y][threadIdx.x]=row_CLL[col];
s_Data[threadIdx.y][threadIdx.x+TILE_X]=data[clh_begin+row*width+col];
s_Data[threadIdx.y][threadIdx.x+TILE_X*2]=data[chl_begin+row*width+col];
s_Data[threadIdx.y][threadIdx.x+TILE_X*3]=data[chh_begin+row*width+col];
__syncthreads();
row_cll[row]=s_Data[threadIdx.y][threadIdx.x];
row_clh[row]=s_Data[threadIdx.y][threadIdx.x+TILE_X];
row_chl[row]=s_Data[threadIdx.y][threadIdx.x+TILE_X*2];
row_chh[row]=s_Data[threadIdx.y][threadIdx.x+TILE_X*3];
}
//С任зϵ
__global__ void iDwt1D_Col(double *CLL,size_t pitch_cll,double *CLH,size_t pitch_clh,double *CHL,size_t pitch_chl,double *CHH,size_t pitch_chh,
int width,int height,int nextWidth,int filter_len,
double *app,size_t pitch_app,double *detail,size_t pitch_detail)
{
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
if(col>=nextWidth||row>=height)
return;
//ݵʼеַ
double* row_CLLData=(double*)((char*)CLL+row*pitch_cll);
double* row_CLHData=(double*)((char*)CLH+row*pitch_clh);
double* row_CHLData=(double*)((char*)CHL+row*pitch_chl);
double* row_CHHData=(double*)((char*)CHH+row*pitch_chh);
//ݵʼеַcol൱תúк
double* row_appData=(double*)((char*)app+col*pitch_app);
double* row_detailData=(double*)((char*)detail+col*pitch_detail);
int oriIndex=col+filter_len-2;//ʼϲλ
double cll_result=0;
double clh_result=0;
double chl_result=0;
double chh_result=0;
#pragma unroll
for(int i=0;i<filter_len;i++)
{
int index=oriIndex-i;
double cllTmpValue=0;
double clhTmpValue=0;
double chlTmpValue=0;
double chhTmpValue=0;
//СֽȡݲҪλòϲУȡ0ڣǷżżԭʼȡ/2λõ
//ֱȡ0
if(index>=0&&index<(2*width-1)&&(index%2)==0)
{
cllTmpValue=row_CLLData[index/2];
clhTmpValue=row_CLHData[index/2];
chlTmpValue=row_CHLData[index/2];
chhTmpValue=row_CHHData[index/2];
}
else
{
cllTmpValue=0;
clhTmpValue=0;
chlTmpValue=0;
chhTmpValue=0;
}
cll_result+=cllTmpValue*c_iLpFilter[i];
clh_result+=clhTmpValue*c_iHpFilter[i];
chl_result+=chlTmpValue*c_iLpFilter[i];
chh_result+=chhTmpValue*c_iHpFilter[i];
}
//൱תú
row_appData[row]=cll_result+clh_result;
row_detailData[row]=chl_result+chh_result;
}
//С任зϵ
__global__ void iDwt1D_Row(double *app,size_t pitch_app,double *detail,size_t pitch_detail,int width,int height,
int nextWidth,int filter_len,double *output,size_t pitch_out)
{
int col=threadIdx.x+blockIdx.x*blockDim.x;
int row=threadIdx.y+blockIdx.y*blockDim.y;
if(col>=nextWidth||row>=height)
return;
//ݺеʼλ
double* row_appData=(double*)((char*)app+row*pitch_app);
double* row_detailData=(double*)((char*)detail+row*pitch_detail);
double* row_output=(double*)((char*)output+row*pitch_out);
int oriIndex=col+filter_len-2;
double app_result=0;
double detail_result=0;
#pragma unroll
for(int i=0;i<filter_len;i++)
{
int index=oriIndex-i;
double appTmpValue=0;
double detailTmpValue=0;
if(index>=0&&index<(2*width-1)&&(index%2)==0)
{
appTmpValue=row_appData[index/2];
detailTmpValue=row_detailData[index/2];
}
else
{
appTmpValue=0;
detailTmpValue=0;
}
app_result+=appTmpValue*c_iLpFilter[i];
detail_result+=detailTmpValue*c_iHpFilter[i];
}
row_output[col]=app_result+detail_result;
}
//άС任
extern "C" void iDwt2D_CUDA
(
int level,int *length,
double *data,int data_size,
double *lpfilter,double *hpfilter,int filter_len,
double *output
)
{
int height=length[0];
int width=length[1];
int offset=height*width;
double *CLL;
size_t pitch_CLL;
cudaMallocPitch((void **)&CLL,&pitch_CLL,sizeof(double)*width,height);
//----------------------------------ݸƽ---------------------------------------------
double *d_data;
cudaMalloc((void **)&d_data,sizeof(double)*data_size);
cudaMemcpy(d_data,data,sizeof(double)*data_size,cudaMemcpyHostToDevice);
cudaMemcpy2D(CLL,pitch_CLL,d_data,sizeof(double)*width,sizeof(double)*width,height,cudaMemcpyDeviceToDevice);//ʼCLL
cudaMemcpyToSymbol(c_iLpFilter, lpfilter, filter_len*sizeof(double));
cudaMemcpyToSymbol(c_iHpFilter, hpfilter, filter_len*sizeof(double));
//----------------------------------ݸƽ------------------------------------------
//----------------------------------JСع------------------------------------------------
dim3 threads(TILE_X,TILE_Y);
for(int iter=0;iter<level;iter++)
{
//ǰĸ߿
int tmp_height=length[2*iter];
int tmp_width=length[2*iter + 1];
//һĸ߿
int next_height=length[2*iter+2];
int next_width=length[2*iter+3];
//----------------------ȡת-----------------------------------------------------
double *cll;size_t pitch_cll;
double *clh;size_t pitch_clh;
double *chl;size_t pitch_chl;
double *chh;size_t pitch_chh;
cudaMallocPitch((void **)&cll,&pitch_cll,sizeof(double)*tmp_height,tmp_width);
cudaMallocPitch((void **)&clh,&pitch_clh,sizeof(double)*tmp_height,tmp_width);
cudaMallocPitch((void **)&chl,&pitch_chl,sizeof(double)*tmp_height,tmp_width);
cudaMallocPitch((void **)&chh,&pitch_chh,sizeof(double)*tmp_height,tmp_width);
dim3 blocks_trans((tmp_width+TILE_X-1)/TILE_X,(tmp_height+TILE_Y-1)/TILE_Y);
getTransposeData_shared<<<blocks_trans,threads>>>(CLL,pitch_CLL,d_data,offset,tmp_width,tmp_height,
cll,pitch_cll,clh,pitch_clh,chl,pitch_chl,chh,pitch_chh);
//----------------------һΣȽзϵ任---------------------------------
double *app;
double *detail;
size_t pitch_app;
size_t pitch_detail;
cudaMallocPitch((void **)&app,&pitch_app,sizeof(double)*tmp_width,next_height);
cudaMallocPitch((void **)&detail,&pitch_detail,sizeof(double)*tmp_width,next_height);
dim3 blocks_col((next_height+TILE_X-1)/TILE_X,(tmp_width+TILE_Y-1)/TILE_Y);
iDwt1D_Col<<<blocks_col,threads>>>(cll,pitch_cll,clh,pitch_clh,chl,pitch_chl,chh,pitch_chh,
tmp_height,tmp_width,next_height,filter_len,app,pitch_app,detail,pitch_detail);
//ͷ
cudaFree(cll);
cudaFree(clh);
cudaFree(chl);
cudaFree(chh);
//----------------------ڶΣзϵ任-------------------------------------
double *out;size_t pitch_out;
cudaMallocPitch((void **)&out,&pitch_out,sizeof(double)*next_width,next_height);
dim3 blocks_row((next_width+TILE_X-1)/TILE_X,(next_height+TILE_Y-1)/TILE_Y);
iDwt1D_Row<<<blocks_row,threads>>>(app,pitch_app,detail,pitch_detail,tmp_width,next_height,next_width,filter_len,out,pitch_out);
offset+=3*tmp_height*tmp_width;
//ͷ
cudaFree(app);
cudaFree(detail);
//-----------------------һCLLʼ----------------------------------------------
cudaFree(CLL);
CLL=out;
pitch_CLL=pitch_out;
if(iter==level-1)
{
cudaMemcpy2D(output,sizeof(double)*next_width,CLL,pitch_CLL,sizeof(double)*next_width,next_height,cudaMemcpyDeviceToHost);
cudaFree(out);
}
}
//-----------------------------JСع--------------------------------------------------
cudaFree(CLL);
cudaFree(d_data);
}
*/
//-------------------------------------------------------------------˹˲------------------------------------------------------------------------------------------------
__global__ void damp(int height,int width,int filter_len,cufftDoubleComplex *d_inp_fft,double *d_filter)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(col>=width||row>=height||col>=filter_len)
return;
d_inp_fft[row*width+col].x*= d_filter[col];
d_inp_fft[row*width+col].y*= d_filter[col];
}
//˹˲
extern "C" void GaussianFilt_CUDA(double *dwt_data,int *length,int orig_height,int dwtlevel,double sigma)
{
int rows,cols,begin,margin,end;
int level=dwtlevel;
begin=length[0]*length[1];
for(int i=0;i<level;i++)
{
rows=length[2*i];
cols=length[2*i+1];
margin=rows*cols;
begin+=margin;
end=begin+margin;
//˲
int filter_len=cols/2+1;
double *filter=(double*)malloc(sizeof(double)*filter_len);
for(int k=0;k<filter_len;k++)
filter[k]=1-exp(-(double)(k)*(k)/(2*sigma*sigma));
double *d_filter;
cudaMalloc((void**)&d_filter, sizeof(double)*filter_len);
cudaMemcpy(d_filter, filter, sizeof(double)*filter_len, cudaMemcpyHostToDevice);
free(filter);
//ݲ
int size = sizeof(cufftDoubleReal)*rows*cols;
cufftDoubleReal *inp,*d_inp;
inp=(cufftDoubleReal *)malloc(size);
cudaMalloc((void**)&d_inp, size);
int tempindex=begin;
for(int k=0;k<rows;k++)
{
for(int j=0;j<cols;j++)
{
inp[ j*rows+ k]=dwt_data[tempindex];
tempindex++;
}
}
cudaMemcpy(d_inp, inp, size, cudaMemcpyHostToDevice);
cufftDoubleComplex *inp_fft;
cufftHandle plan_forward,plan_backward;
int half=rows/2+1;
cudaMalloc((void**)&inp_fft, sizeof(cufftDoubleComplex)*(half)*cols);
cufftPlan1d(&plan_forward, rows, CUFFT_D2Z, cols);
cufftExecD2Z(plan_forward, d_inp,inp_fft);
cufftDestroy(plan_forward);
//damp<<<(half*cols+PITCH-1)/PITCH,PITCH>>>(half*cols,half,filter_len,inp_fft,d_filter);
dim3 threads(TILE_X,TILE_Y);
dim3 blocks((half+TILE_X-1)/TILE_X,(cols+TILE_Y-1)/TILE_Y);
damp<<<blocks,threads>>>(cols,half,filter_len,inp_fft,d_filter);
cufftPlan1d(&plan_backward, rows, CUFFT_Z2D, cols);
cufftExecZ2D(plan_backward, inp_fft, d_inp);
cufftDestroy(plan_backward);
cudaMemcpy(inp, d_inp, size, cudaMemcpyDeviceToHost);
tempindex=begin;
for(int k=0;k<rows;k++)
{
for(int j=0;j<cols;j++)
dwt_data[tempindex++]=inp[ j*rows+ k]/rows;
}
begin=end+margin;
free(inp);
cudaFree(d_inp);
cudaFree(inp_fft);
cudaFree(d_filter);
}
}
|
5,904 | #include "includes.h"
// Possible weight coefficients for tracking cost evaluation :
// Gaussian discretisation
/*
* 1 4 6 4 1
* 4 16 24 16 4
* 6 24 36 24 6
* 4 16 24 16 4
* 1 4 6 4 1
*/
// Compute spatial derivatives using Scharr operator - Naive implementation..
// Compute spatial derivatives using Scharr operator - Naive implementation..
// Compute spatial derivatives using Sobel operator - Naive implementation..
// Compute spatial derivatives using Sobel operator - Naive implementation..
// Low pass gaussian-like filtering before subsampling
// Low pass gaussian-like filtering before subsampling
/*
// Upsample a picture using the "magic" kernel
*/
__global__ void kernelSmoothY(float const * in, int w, int h, float * out)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if(x >= w || y >= h)
return;
int a = y-2;
int b = y-1;
int c = y;
int d = y+1;
int e = y+2;
if(a < 0) a = 0;
if(b < 0) b = 0;
if(d >= h) d = h-1;
if(e >= h) e = h-1;
out[y*w+x] = 0.0625f*in[a*w+x] + 0.25f*in[b*w+x] + 0.375f*in[c*w+x] + 0.25f*in[d*w+x] + 0.0625f*in[e*w+x];
} |
5,905 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_WIDTH 256
__global__ void histogram(char *d_array_in, int *d_array_out, int n)
{
__shared__ int shared_bin[128];
int i, index, blocks, iterations;
blocks = (n - 1) / BLOCK_WIDTH + 1;
iterations = 127 / (blocks * BLOCK_WIDTH) + 1;
for (i = 0; i < iterations; i++)
{
index = (blockIdx.x + i * blocks) * blockDim.x + threadIdx.x;
if (index < 128)
{
d_array_out[index] = 0;
}
}
iterations = 127 / BLOCK_WIDTH + 1;
for (i = 0; i < iterations; i++)
{
index = i * blockDim.x + threadIdx.x;
if (index < 128)
{
shared_bin[index] = 0;
}
__syncthreads();
}
index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n)
{
atomicAdd(&shared_bin[d_array_in[index]], 1);
}
__syncthreads();
for (i = 0; i < iterations; i++)
{
index = i * blockDim.x + threadIdx.x;
if (index < 128)
{
atomicAdd(&d_array_out[index], shared_bin[index]);
}
__syncthreads();
}
return;
}
int main(int argc, char *argv[])
{
bool input_check = false;
bool expected_check = false;
bool output_check = false;
bool error_present = false;
bool expect_output = false;
bool output_pass;
char input_file_name[256];
char expected_file_name[256];
char output_file_name[256];
FILE *input_file = NULL;
FILE *expected_file = NULL;
FILE *output_file = NULL;
char *h_array_in = NULL;
int *h_array_out = NULL;
char *d_array_in = NULL;
int *d_array_out = NULL;
int *expectedOutput = NULL;
int i, n, num_bins, dataset_no;
for (i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-i") == 0 && argc > i + 1)
{
if (argv[i + 1][0] != '-')
{
input_check = true;
strcpy(input_file_name, argv[i + 1]);
}
}
if (strcmp(argv[i], "-e") == 0 && argc > i + 1)
{
if (argv[i + 1][0] != '-')
{
expected_check = true;
strcpy(expected_file_name, argv[i + 1]);
}
}
if (strcmp(argv[i], "-o") == 0)
{
expect_output = true;
if (argc > i + 1)
{
if (argv[i + 1][0] != '-')
{
output_check = true;
strcpy(output_file_name, argv[i + 1]);
}
}
}
}
if (!input_check)
{
std::cout << "Execution command syntax error: \"Input\" filename required" << std::endl;
error_present = true;
}
else
{
input_file = fopen(input_file_name, "r");
if (!input_file)
{
std::cout << "Error: File " << input_file_name << " does not exist" << std::endl;
error_present = true;
}
}
if (!expected_check)
{
std::cout << "Execution command syntax error: \"Expected Output\" filename required" << std::endl;
error_present = true;
}
else
{
expected_file = fopen(expected_file_name, "r");
if (!expected_file)
{
std::cout << "Error: File " << expected_file_name << " does not exist" << std::endl;
error_present = true;
}
}
if (!output_check && expect_output)
{
std::cout << "Execution Command Syntax Warning: \"Output\" filename expected" << std::endl;
}
else if (output_check)
{
output_file = fopen(output_file_name, "w");
}
if (error_present)
{
std::cout << "Use the following command to run the program:\n\n"
"./<program> -e <expected> -i <input> -o <output>\n\n"
"Where <expected> is the expected output file, <input> is the input dataset files, and <output> is an optional path to store the results"
<< std::endl;
}
else
{
dataset_no = 0;
while (true)
{
h_array_in = (char *)malloc(1024 * sizeof(char));
if (fgets(h_array_in, 1024, input_file) == NULL)
{
break;
}
for (n = 0; h_array_in[n] != '\n'; n++)
{
continue;
}
h_array_in[n] = '\0';
if (fscanf(expected_file, "%d", &num_bins) == -1)
{
break;
}
expectedOutput = (int *)malloc(num_bins * sizeof(int));
for (i = 0; i < num_bins; i++)
{
fscanf(expected_file, "%d", &expectedOutput[i]);
}
h_array_out = (int *)malloc(128 * sizeof(int));
cudaMalloc((void **)&d_array_in, n * sizeof(char));
cudaMalloc((void **)&d_array_out, 128 * sizeof(int));
cudaMemcpy(d_array_in, h_array_in, n * sizeof(char), cudaMemcpyHostToDevice);
dim3 blocks((n - 1) / BLOCK_WIDTH + 1);
dim3 threads_per_block(BLOCK_WIDTH);
histogram<<<blocks, threads_per_block>>>(d_array_in, d_array_out, n);
cudaMemcpy(h_array_out, d_array_out, 128 * sizeof(int), cudaMemcpyDeviceToHost);
if (output_check)
{
fprintf(output_file, "%d", 128);
for (i = 0; i < num_bins; i++)
{
fprintf(output_file, "\n%d", h_array_out[i]);
}
fprintf(output_file, "\n");
fflush(output_file);
}
output_pass = true;
for (i = 0; i < 128; i++)
{
if (expectedOutput[i] != h_array_out[i])
{
output_pass = false;
}
}
if (output_pass)
{
std::cout << "Dataset " << dataset_no << " PASSED" << std::endl;
}
else
{
std::cout << "Dataset " << dataset_no << " FAILED" << std::endl;
}
dataset_no++;
cudaFree(d_array_in);
cudaFree(d_array_out);
free(h_array_in);
free(h_array_out);
free(expectedOutput);
}
if (output_check)
{
std::cout << "Results stored in " << output_file_name << std::endl;
}
fclose(input_file);
fclose(expected_file);
if (output_check)
{
fclose(output_file);
}
}
return 0;
}
|
5,906 | extern "C"
{
__global__ void vmuldiv_dp(const double *a, const double *b, double *c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] *= a[idx] / b[idx];
}
__global__ void vmuldiv_sp(const float *a, const float *b, float *c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] *= a[idx] / b[idx];
}
}
|
5,907 | #include <stdio.h>
#include <cuda.h>
#define BLOCKSIZE 26
__global__ void dkernel() {
__shared__ char str[BLOCKSIZE+1];
str[threadIdx.x] = 'A' + (threadIdx.x + blockIdx.x) % BLOCKSIZE;
if (threadIdx.x == 0) {
str[BLOCKSIZE] = '\0';
}
//__syncthreads();
if (threadIdx.x == 0) {
printf("%d: %s\n", blockIdx.x, str);
}
}
int main() {
dkernel<<<10, BLOCKSIZE>>>();
cudaDeviceSynchronize();
}
|
5,908 | #include "includes.h"
__global__ void ExpProbPolynomProbsImpl( const float* features, int batchSize, const int* splits, const float* conditions, const int* polynomOffsets, int polynomCount, float lambda, float* probs) {
if (threadIdx.x < batchSize) {
int polynomId = blockIdx.x;
features += threadIdx.x;
probs += threadIdx.x;
while (polynomId < polynomCount) {
int offset = polynomOffsets[polynomId];
int nextOffset = polynomOffsets[polynomId + 1];
const int depth = nextOffset - offset;
float logProb = 0;
bool zeroProb = false;
for (int i = 0; i < depth; ++i) {
if (zeroProb) {
continue;
}
const int f = __ldg(splits + offset + i);
const float c = __ldg(conditions + offset + i);
const float x = __ldg(features + f * batchSize);
const float val = -lambda * x;
const float expVal = 1.0f - expf(val);
if (isfinite(log(expVal))) {
logProb += log(expVal);
} else {
zeroProb = true;
}
}
float prob = 0.0f;
if (!zeroProb) {
prob = expf(logProb);
}
probs[polynomId * batchSize] = prob;
polynomId += gridDim.x;
}
}
} |
5,909 | // Name: Nishanth Baskaran
// Student ID: 19M15017
// HPSC Assignment-L5
#include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void init(int *bucket) {
int i= blockIdx.x * blockDim.x + threadIdx.x;
bucket[i]=0;
}
__global__ void add(int *key,int *bucket){
int i= blockIdx.x * blockDim.x + threadIdx.x;
atomicAdd(&bucket[key[i]],1);
}
__global__ void sort(int *key,int *bucket){
int i= blockIdx.x * blockDim.x + threadIdx.x;
for (int j=0,k=0; k<=i; j++){
key[i]=j;
__syncthreads();
k+=bucket[j];
__syncthreads();
}
}
int main() {
int n = 50;
int range = 5;
int *key, *bucket;
cudaMallocManaged(&key,n*sizeof(int));
cudaMallocManaged(&bucket,range*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
//since the range and n is small, using only 1 block for parallelisation
init<<<1,range>>>(bucket);
add<<<1,n>>>(key,bucket);
sort<<<1,n>>>(key,bucket);
cudaDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
cudaFree(key);
cudaFree(bucket);
}
|
5,910 | // Multiply two matrices A * B = C
// Original source: http://gpgpu-computing4.blogspot.co.id/2009/08/matrix-multiplication-1.html
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
// #include <matrixMul_kernel.cu>
// Thread block size
// #define BLOCK_SIZE 16
// #define TILE_SIZE 16
//
// #define WA 1024 // Matrix A width
// #define HA 1024 // Matrix A height
// #define WB 1024 // Matrix B width
// #define HB WA // Matrix B height
// #define WC WB // Matrix C width
// #define HC HA // Matrix C height
// CUDA Kernel
__global__ void matrixMul( float* C, float* A, float* B, int wA, int wB)
{
// 1. 2D Thread ID
// int tx = blockIdx.x * TILE_SIZE + threadIdx.x;
// int ty = blockIdx.y * TILE_SIZE + threadIdx.y;
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
// value stores the element that is
// computed by the thread
float value = 0;
int i;
for (i = 0; i < wA; ++i)
{
float elementA = A[ty * wA + i];
float elementB = B[i * wB + tx];
value += elementA * elementB;
}
// Write the matrix to device memory each
// thread writes one element
C[ty * wA + tx] = value;
}
// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
int i;
for (i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void init(float* data, int size, float val)
{
int i;
for (i = 0; i < size; ++i)
data[i] = val;
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
float t0 = clock();
int i, HA, WA, HB, WB, HC, WC;
HA = atoi(argv[1]);
WA = HA; HB = HA; WB = HA; HC = HA; WC = HA;
int BLOCK_SIZE = atoi(argv[2]);
int print = (argc >= 4) ? 1 : 0;
// set seed for rand()
srand(2006);
// 1. allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
// randomInit(h_A, size_A);
// randomInit(h_B, size_B);
init(h_A, size_A, 1);
init(h_B, size_B, 1);
// 8. allocate device memory
float* d_A;
float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
// 9. copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
// 4. allocate host memory for the result C
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
// 10. allocate device memory for the result
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// 5. perform the calculation
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(WC / threads.x, HC / threads.y);
// execute the kernel
matrixMul<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// 11. copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
// 6. print out the results
// print out matrices
if (print == 1) {
printf("\n\nMatrix A\n");
for(i = 0; i < size_A; i++)
{
printf("%f ", h_A[i]);
if(((i + 1) % WA) == 0)
printf("\n");
}
printf("\n\nMatrix B\n");
for(i = 0; i < size_B; i++)
{
printf("%f ", h_B[i]);
if(((i + 1) % WB) == 0)
printf("\n");
}
printf("\n\nMatrix C (Results)\n");
for(i = 0; i < size_C; i++)
{
printf("%f ", h_C[i]);
if(((i + 1) % WC) == 0)
printf("\n");
}
printf("\n");
}
// 7. clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
t0 = ((float)(clock() - t0) / CLOCKS_PER_SEC);
printf("%d\t%d\t%f\n", WA, BLOCK_SIZE, t0);
return 0;
}
|
5,911 | //pass
//--blockDim=1024 --gridDim=128
#include <cuda.h>
//--------------------------------------------------------------------------------------
// File: ComputeEngine.h
//
// This is an AMPC++ implementation of a compute shader. It transforms a shape with a
// rotation of an angle THETA.
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//--------------------------------------------------------------------------------------
#define THETA 3.1415f/1024
__global__ void run(float* data_refY, float* data_refX)
{
// Rotate the vertex by angle THETA
int idx = blockIdx.x*blockDim.x + threadIdx.x;
data_refY[idx] = data_refY[idx] * cos(THETA) - data_refX[idx] * sin(THETA);
data_refX[idx] = data_refY[idx] * sin(THETA) + data_refX[idx] * cos(THETA);
#ifdef MUTATION
data_refX[idx+1] = data_refX[idx+1];
/* BUGINJECT: ADD_ACCESS, UP */
#endif
}
|
5,912 | #include "includes.h"
#define SIZE 16
__global__ void compare(int *in_d, int* out_d)
{
if (in_d[blockIdx.x] == 6)
{
out_d[blockIdx.x] = 1;
}
else
out_d[blockIdx.x] = 0;
} |
5,913 | #include "includes.h"
__global__ void updateHiddenWeights(float* d_weights, float error, float lr, int keyPress, float* d_outputweights, int screenSize, int numHiddenNeurons, float* d_bias, float* firstFire){
int id = threadIdx.x + blockDim.x * blockIdx.x;
float totalChange = 0.0f;
for (int i = 0; i < screenSize; ++i){
//Output weights stride is numNeurons, keypress is index into that section
float change = error * lr *d_outputweights[id * numHiddenNeurons + keyPress] * (firstFire[id] * 2 - 1);
totalChange += change;
d_weights[id * screenSize + i] = d_weights[id * screenSize + i] + change;
d_weights[id * screenSize + i] = min(1.0f, d_weights[id * screenSize + i]);
d_weights[id * screenSize + i] = max(0.0f, d_weights[id * screenSize + i]);
}
float biasChange = totalChange * -0.5f;
//printf("TotalChange: %f", biasChange);
d_bias[id] = d_bias[id] + biasChange;
} |
5,914 | __global__ void vecadd(float *a, float *b, float* c)
{
// Get our global thread ID
int id = blockIdx.x;
// Make sure we do not go out of bounds
c[id] = a[id] + b[id];
}
|
5,915 | #include <stdio.h>
#include <cuda.h>
#include <string.h>
//testing commit
//ensure that your code is safeguarded against segmentstion faults etc...
__global__ void cypher_thread(char * t_input, char * t_output, int length){
int idx = threadIdx.x;
if(idx < length){
char c = t_input[idx];
t_output[idx] = c-1;
}
}
int main(){
//initialize test message
const char program_input[] = "Ifmmp-!J!bn!b!tuvefou!ifsf!jo!uif!Dpnqvufs!Tdjfodf!Efqu/!J!kvtu!xboufe!up!dpohsbuvmbuf!zpv!po!zpvs!ofx!qptjujpo!bt!Dibjs!pg!uif!Efqbsunfou/!!Cftu!xjtift/";
int length = strlen(program_input);
int size = length * sizeof(char);
char program_output[length];
//declare GPU memory pointers
char * t_input;
char * t_output;
//allocate memory on GPU
cudaMalloc((void **)&t_input, size);
cudaMalloc((void **)&t_output, size);
//transfer info to GPU
cudaMemcpy(t_input, program_input, size, cudaMemcpyHostToDevice);
//kernel
cypher_thread<<<1, length>>>(t_input, t_output, length);
//get result from GPU
cudaMemcpy(program_output, t_output, size, cudaMemcpyDeviceToHost);
//print output
for(int i = 0; i < length; i++){
printf("%c", program_output[i]);
}
//free gpu memory
cudaFree(t_input);
cudaFree(t_output);
return 0;
}
|
5,916 | #include<cuda.h>
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__global__ void extrapolKernel(
double* const rs, //RS
const double* const extVal,//Var extrapol
const double* const phiS, //Level Set F
const double* const jbn, //Jacobian
const double* const d_Phi, //Phi Der
const double deltaX,
const double deltaY,
const double deltaZ,
const unsigned int Nx,
const unsigned int Ny,
const unsigned int Nz,
const int extFlag
)
{
const int Offset = Nx*Ny*Nz;
int id2;
double so;
double phiDeltaX, phiDeltaY, phiDeltaZ,
d_ext_xu, d_ext_xd,
d_ext_yu, d_ext_yd,
d_ext_zu, d_ext_zd;
int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
//Offsets example (id_ip) EQ (i+1,j,k)
int id = Nx*Ny*idz + Nx*idy + idx,
id_ip = Nx*Ny*idz + Nx*idy + idx + 1,
id_im = Nx*Ny*idz + Nx*idy + idx - 1,
id_jp = Nx*Ny*idz + Nx*(idy + 1) + idx,
id_jm = Nx*Ny*idz + Nx*(idy - 1) + idx,
id_kp = Nx*Ny*(idz + 1) + Nx*idy + idx,
id_km = Nx*Ny*(idz - 1) + Nx*idy + idx;
//Dealing with boundaries
id2 = id;
if(idx==0){id2 = id_ip; id_im = id;}
if(idy==0){id2 = id_jp; id_jm = id;}
if(idz==0){id2 = id_kp; id_km = id;}
if(idx==Nx-1){id2 = id_im ; id_ip = id;}
if(idy==Ny-1){id2 = id_jm ; id_jp = id;}
if(idz==Nz-1){id2 = id_km ; id_kp = id;}
// pick up the side to extrapol
if(extFlag>0){
so = (double)(phiS[id]>0.0);
}
else{
so = -1.0*(double)(phiS[id]<=0.0);
}
phiDeltaX = so*d_Phi[id ];
phiDeltaY = so*d_Phi[id + 1*Offset];
phiDeltaZ = so*d_Phi[id + 2*Offset];
// Downwind derivatives of ext
d_ext_xd = deltaX*jbn[id ]*(extVal[id2] - extVal[id_im]);
d_ext_yd = deltaY*jbn[id + 4*Offset]*(extVal[id2] - extVal[id_jm]);
d_ext_zd = deltaZ*jbn[id + 8*Offset]*(extVal[id2] - extVal[id_km]);
// Upwind derivatives of ext
d_ext_xu = deltaX*jbn[id ]*(extVal[id_ip] - extVal[id2]);
d_ext_yu = deltaY*jbn[id + 4*Offset]*(extVal[id_jp] - extVal[id2]);
d_ext_zu = deltaZ*jbn[id + 8*Offset]*(extVal[id_kp] - extVal[id2]);
double xMax = (double)(phiDeltaX > 0.0)
- (double)(phiDeltaX < 0.0);
rs[id] = (0.5*(xMax + 1.0)*d_ext_xd
+ 0.5*abs(xMax - 1.0)*d_ext_xu)*phiDeltaX;
xMax = (double)(phiDeltaY > 0.0)
- (double)(phiDeltaY < 0.0);
rs[id] += (0.5*(xMax + 1.0)*d_ext_yd
+ 0.5*abs(xMax - 1.0)*d_ext_yu)*phiDeltaY;
xMax = (double)(phiDeltaZ > 0.0)
- (double)(phiDeltaZ < 0.0);
rs[id] += (0.5*(xMax + 1.0)*d_ext_zd
+ 0.5*abs(xMax - 1.0)*d_ext_zu)*phiDeltaZ;
return;
}
__global__ void DevFirstOrder_LS(
double* const d_Phi,
const double* const phiS,
const double* const jbn,
const double deltaX,
const double deltaY,
const double deltaZ,
const unsigned int Nx,
const unsigned int Ny,
const unsigned int Nz
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
//Offsets example (id_ip) EQ (i+1,j,k)
unsigned int id = Nx*Ny*idz + Nx*idy + idx,
id_ip = Nx*Ny*idz + Nx*idy + idx + 1,
id_im = Nx*Ny*idz + Nx*idy + idx - 1,
id_jp = Nx*Ny*idz + Nx*(idy + 1) + idx,
id_jm = Nx*Ny*idz + Nx*(idy - 1) + idx,
id_kp = Nx*Ny*(idz + 1) + Nx*idy + idx,
id_km = Nx*Ny*(idz - 1) + Nx*idy + idx;
double factor = 0.5;
//Dealing with boundaries
if(idx==0){id_im = id; factor = 1.0;}
if(idy==0){id_jm = id; factor = 1.0;}
if(idz==0){id_km = id; factor = 1.0;}
if(idx==Nx-1){id_ip = id; factor = 1.0;}
if(idy==Ny-1){id_jp = id; factor = 1.0;}
if(idz==Nz-1){id_kp = id; factor = 1.0;}
const unsigned int Offset = Nx*Ny*Nz;
d_Phi[ id] = factor*deltaX*jbn[id ]
* (phiS[id_ip] - phiS[id_im]);
d_Phi[1*Offset + id] = factor*deltaY*jbn[id + 4*Offset]
* (phiS[id_jp] - phiS[id_jm]);
d_Phi[2*Offset + id] = factor*deltaZ*jbn[id + 8*Offset]
* (phiS[id_kp] - phiS[id_km]);
return;
}
__global__ void RunGK_FirstS(
double* d,
double* d0,
double dt,
double* rs,
const int Nx, const int Ny, const int Nz
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
const unsigned int id = idx + idy*Nx + idz*Nx*Ny;
d[id] = d0[id] - dt*rs[id];
return;
}
__global__ void RunGK_SecondS(
double* d,
double* d0,
double* d1,
double dt,
double* rs,
const int Nx, const int Ny, const int Nz
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
const unsigned int id = idx + idy*Nx + idz*Nx*Ny;
d[id] = 0.75*d0[id] +0.25*( d1[id] - dt*rs[id]);
return;
}
__global__ void RunGK_ThirdS(
double* d,
double* d0,
double* d1,
const double dt,
double* rs,
const int Nx, const int Ny, const int Nz
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
const unsigned int id = idx + idy*Nx + idz*Nx*Ny;
d[id] = (d0[id] + 2.0*( d1[id] - dt*rs[id])) / 3.0 ;
return;
}
__global__ void copyLSGas(
double* const value,
const double* const copyVal,
const double* const phiS,
int Nx, int Ny, int Nz
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
const unsigned int id = idx + idy*Nx + idz*Nx*Ny;
value[id] = (phiS[id] > 0.0) ? copyVal[id] : value[id];
return;
}
__global__ void copyLSLiquid(
double* const value,
const double* const copyVal,
const double* const phiS,
int Nx, int Ny, int Nz,
int disp
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
const unsigned int id = idx + idy*Nx + idz*Nx*Ny;
const unsigned int offset = Nx*Ny*Nz;
value[id + disp*offset] = (phiS[id] < 0.0) ? copyVal[id] :
value[id + disp*offset];
return;
}
|
5,917 | #include <iostream>
__global__ void add(int a, int b, int *c){
*c = a + b;
}
int main(void){
int c;
int *dev_c;
cudaMalloc((void**)&dev_c,sizeof(int));
add<<<1,1>>> (6, 9,dev_c);
cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost);
printf("6+9=%d\n",c);
cudaFree(dev_c);
return 0;
}
|
5,918 | #include "includes.h"
__global__ void ComputeDerivativesKernel(int width, int height, int stride, float* Ix, float* Iy, float* Iz, cudaTextureObject_t texSource, cudaTextureObject_t texTarget)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0, t1;
// x derivative
t0 = tex2D<float>(texSource, x + 2.0f * dx, y);
t0 -= tex2D<float>(texSource, x + 1.0f * dx, y) * 8.0f;
t0 += tex2D<float>(texSource, x - 1.0f * dx, y) * 8.0f;
t0 -= tex2D<float>(texSource, x - 2.0f * dx, y);
t0 /= 12.0f;
t1 = tex2D<float>(texTarget, x + 2.0f * dx, y);
t1 -= tex2D<float>(texTarget, x + 1.0f * dx, y) * 8.0f;
t1 += tex2D<float>(texTarget, x - 1.0f * dx, y) * 8.0f;
t1 -= tex2D<float>(texTarget, x - 2.0f * dx, y);
t1 /= 12.0f;
*(((float*)((char*)Ix + stride * iy)) + ix) = (t0 + t1) * 0.5f;
// t derivative
*(((float*)((char*)Iz + stride * iy)) + ix) = tex2D<float>(texSource, x, y) - tex2D<float>(texTarget, x, y);
// y derivative
t0 = tex2D<float>(texSource, x, y + 2.0f * dy);
t0 -= tex2D<float>(texSource, x, y + 1.0f * dy) * 8.0f;
t0 += tex2D<float>(texSource, x, y - 1.0f * dy) * 8.0f;
t0 -= tex2D<float>(texSource, x, y - 2.0f * dy);
t0 /= 12.0f;
t1 = tex2D<float>(texTarget, x, y + 2.0f * dy);
t1 -= tex2D<float>(texTarget, x, y + 1.0f * dy) * 8.0f;
t1 += tex2D<float>(texTarget, x, y - 1.0f * dy) * 8.0f;
t1 -= tex2D<float>(texTarget, x, y - 2.0f * dy);
t1 /= 12.0f;
*(((float*)((char*)Iy + stride * iy)) + ix) = (t0 + t1) * 0.5f;
} |
5,919 | #include <cuda_runtime.h>
#include <stdio.h>
// debug 模式启动
int main(){
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("Device %d: %s \n",dev,deviceProp.name);
printf("Total amount of global memory %2.f Mbytes\n",deviceProp.totalGlobalMem/(pow(1024.0,3)));
return 0;
} |
5,920 | #include "includes.h"
__global__ void AddIntsCUDA(int* a, int* b) {
for (int i = 0; i < 1000005; i++) {
a[0] += b[0];
}
} |
5,921 | #include <iostream>
__global__ void axpy() {
}
int main(int argc, char* argv[]) {
// Launch the kernel.
axpy<<<1, 10>>>();
cudaDeviceReset();
return 0;
}
|
5,922 | #include <iostream>
#include "../ginkgo/GLevelOrderList.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
typedef gpu_ginkgo::LevelOrderList<5> gglol;
__global__ void test(){
gglol *p;
p = new gglol(1024, true);
printf("p = new gglol(1024);");
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
printf("\n\n<<< SENDING A NEW ORDER >>>");
p->sendNewOrder(20, 3.0);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
printf("\n\n<<< SENDING A NEW ORDER >>>");
p->sendNewOrder(20, 3.0);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
printf("\n\n<<< SENDING A NEW ORDER >>>");
p->sendNewOrder(20, 4.0);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
printf("\n\n<<< SOME ORDER BEING ACKED >>>");
p->procPendingOrders(20, 3.1);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
printf("\n\n<<< CANCELING THESE ORDERS >>>");
p->preCancel(5.2);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
printf("\n\n<<< SOME ORDER BEING ACKED >>>");
p->procPendingOrders(20, 4.1);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
printf("\n\n<<< SOME TRADE COMING >>>\n");
int tv = 55, fqty = 0, fpnl = 0, dq = 0, dqs = 0, bz = 0;
printf("Trade size = %d, filled qty = %d, filled pnl = %d", tv, fqty, fpnl);
p->procTrade(tv, fqty, fpnl, dq, dqs, 1024, bz);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
printf("Trade size = %d, filled qty = %d, filled pnl = %d\n\n", tv, fqty, fpnl);
printf("\n\n<<< BOOK UPDATE OCCURS >>>");
p->adjustQAgainstBU(100);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
printf("\n\n<<< BOOK UPDATE OCCURS >>>");
p->adjustQAgainstBU(20);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
printf("\n\n<<< CANCEL TIME COMES >>>");
p->procCanceledOrders(5.5);
p->showLevelInfo();
p->showPendingOrderInfo();
p->showAckedOrderInfo();
//
delete p;
p = NULL;
if(!p) printf("\n\n SUCCESSFUL REMOVE THE LEVEL\n\n");
}
int main(){
def_dvec(float) dev_out(1, 0);
test<<<1, 1>>>();
return 0;
} |
5,923 | #include "includes.h"
__global__ void fillPartitionLabelKernel(int size, int *coarseAggregate, int *fineAggregateSort, int *partitionLabel)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
partitionLabel[idx] = coarseAggregate[ fineAggregateSort[idx] ];
}
} |
5,924 | #include <thrust/device_vector.h>
#include <thrust/count.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
struct is_equal_count
{
int *tid_data;
int *count;
is_equal_count(int *tid, int *c) : tid_data(tid), count(c) {}
__host__ __device__
void operator() (const int & i)
{
if (tid_data[i] != tid_data[i+1])
count[i] = 1;
}
};
// struct position
// {
// int *tid_data;
// int *pos_data;
// int len;
// __host__ __device__
// position() {}
// template <typename T>
// __host__ __device__
// void operator() (const T& t)
// {
// }
// };
// output a stream of string
struct make_stream
{
unsigned int *index;
int *tid_data;
double *lon_data, *lat_data;
char **res_data;
__host__ __device__
make_stream(unsigned int *len, int *tid, double *lon, double *lat, char** res)
: index(len), tid_data(tid), lon_data(lon), lat_data(lat), res_data(res) {}
__host__ __device__
int itoa(int i, char *a)
{
int len = 0;
int L = 0;
int I = i;
while (I > 0)
{
L++;
I /= 10;
}
len = L;
a[L--] = '\0';
for (; i >= 10; --L)
{
a[L] = '0' + i % 10;
i /= 10;
}
a[L] = '0' + i;
// printf("%d, %s\n", len, a);
return len;
}
__host__ __device__
int floor(double d)
{
int I = (int)d;
if (d - I > 0.5) return I+1;
else return I;
}
__host__ __device__
int dtoa(double d, char *a)
{
int L = 0, S;
double I = d;
while (I >= 1)
{
L++;
I /= 10;
}
S = L++;
int U;
I = d - (int)(d);
I *= 10;
U = (int) (I);
printf("%lf, %lf\n", d, d - (int)(d));
// error
while (I > 0 && L < 8)
{
a[L] = '0' + U;
L++;
I = I - (int)(I);
I *= 10;
U = (int)(I);
}
a[L] = '\0';
a[S--] = '.';
I = d;
// tackle LEFT part of '.'
for (; I >= 10; --S)
{
a[S] = '0' + ((int)I)% 10;
I /= 10;
}
a[0] = '0' + (int)I;
// printf("-------%s\n", a);
return L;
}
__host__ __device__
void cuda_concat(char *a, int len_a, char *b, int len_b)
{
for (int idx = 0; idx < len_b; ++idx)
{
a[len_a+idx] = b[idx];
}
}
template <typename T>
__device__
void operator() (const T& i)
{
if (tid_data[i] != tid_data[i+1] || i == 0)
{
atomicAdd(index+1, 1);
char *sstring = res_data[index[1]];
printf("-----before: %d\n", index[1]);
__syncthreads();
printf("-----after: %d\n", index[1]);
// __syncthreads();
// char *sstring = new char[100];
int k = (i==0 ? i : i+1);
// char *s = new char[20];
char *s = "{\"trip\": ";
cuda_concat(sstring, 0, s, 9);
int len = 9;
// char *ss = new char[20];
char ss[20];
int _l = itoa(tid_data[k], ss);
cuda_concat(sstring, len, ss, _l);
len += _l;
// delete [] ss;
// sstring[len] = '\0';
// printf("%d, %s\n", len, sstring);
char str[20] = ", \"points\": [";
cuda_concat(sstring, len, str, 13);
len += 13;
for (; tid_data[k] == tid_data[k+1]; ++k)
{
cuda_concat(sstring, len, "[", 1);
len += 1;
// char *sss = new char[20];
char sss[20];
int l = dtoa(lon_data[k], sss);
cuda_concat(sstring, len, sss, l);
len += l;
// delete [] sss;
cuda_concat(sstring, len, ", ", 2);
len += 2;
// char *ssss = new char[20];
char ssss[20];
l = dtoa(lat_data[k], ssss);
cuda_concat(sstring, len, ssss, l);
len += l;
// delete [] ssss;
cuda_concat(sstring, len, "], ", 3);
len += 3;
// printf("============%d, %d\n", tid_data[k], tid_data[k+1]);
// printf("============%d, %s\n", i, sstring);
}
// to handle the last item
cuda_concat(sstring, len, "[", 1);
len += 1;
int l = dtoa(lon_data[k], s);
cuda_concat(sstring, len, s, l);
len += l;
cuda_concat(sstring, len, ", ", 2);
len += 2;
l = dtoa(lat_data[k], s);
cuda_concat(sstring, len, s, l);
len += l;
cuda_concat(sstring, len, "], ", 3);
len += 3;
// finish the json stream
cuda_concat(sstring, len, "[0, 0]]", 8);
len += 7;
sstring[len] = '\0';
// printf("---------------- %s\n", sstring);
// delete [] s;
// atomicAdd(index+1, 1);
// __syncthreads();
}
}
};
struct check_stream
{
size_t MAX;
size_t *position_data;
int *tid_data;
double *lon_data, *lat_data;
char **res_data;
__host__ __device__
check_stream(size_t *position, size_t max, int *tid, double *lon, double *lat, char** res)
: position_data(position), MAX(max), tid_data(tid), lon_data(lon), lat_data(lat), res_data(res) {}
__host__ __device__
int itoa(int i, char *a)
{
int len = 0;
int L = 0;
int I = i;
while (I > 0)
{
L++;
I /= 10;
}
len = L;
a[L--] = '\0';
for (; i >= 10; --L)
{
a[L] = '0' + i % 10;
i /= 10;
}
a[L] = '0' + i;
// printf("%d, %s\n", len, a);
return len;
}
__host__ __device__
int dtoa(double d, char *a)
{
int L = 0, S;
double I = d;
while (I >= 1)
{
L++;
I /= 10;
}
S = L++;
int U;
I = d - (int)(d);
I *= 10;
U = (int) (I);
// printf("%lf, %lf\n", d, d - (int)(d));
// error
while (I > 0 && L < 8)
{
a[L] = '0' + U;
L++;
I = I - (int)(I);
I *= 10;
U = (int)(I);
}
a[L] = '\0';
a[S--] = '.';
I = d;
// tackle LEFT part of '.'
for (; I >= 10; --S)
{
a[S] = '0' + ((int)I)% 10;
I /= 10;
}
a[0] = '0' + (int)I;
// printf("-------%s\n", a);
return L;
}
__host__ __device__
void cuda_concat(char *a, int len_a, char *b, int len_b)
{
for (int idx = 0; idx < len_b; ++idx)
{
a[len_a+idx] = b[idx];
}
}
__host__ __device__
void operator() (const int & i)
{
if (position_data[i] < MAX && position_data[i] > 0 || i == 0)
{
size_t k = i == 0 ? 0 : position_data[i]+1;
char * sstring = res_data[i];
// start to output a new trajectory
char *s = "{\"trip\": ";
cuda_concat(sstring, 0, s, 9);
int len = 9;
// char *ss = new char[20];
char ss[20];
int _l = itoa(tid_data[k+1], ss);
cuda_concat(sstring, len, ss, _l);
len += _l;
// delete [] ss;
// sstring[len] = '\0';
// printf("%d, %s\n", len, sstring);
char str[20] = ", \"points\": [";
cuda_concat(sstring, len, str, 13);
len += 13;
for (; k < position_data[i+1]; ++k)
{
cuda_concat(sstring, len, "[", 1);
len += 1;
// char *sss = new char[20];
char sss[20];
int l = dtoa(lon_data[k], sss);
cuda_concat(sstring, len, sss, l);
len += l;
// delete [] sss;
cuda_concat(sstring, len, ", ", 2);
len += 2;
// char *ssss = new char[20];
char ssss[20];
l = dtoa(lat_data[k], ssss);
cuda_concat(sstring, len, ssss, l);
len += l;
// delete [] ssss;
cuda_concat(sstring, len, "], ", 3);
len += 3;
// printf("============%d, %d\n", tid_data[k], tid_data[k+1]);
// printf("============%d, %s\n", i, sstring);
}
// to handle the last item
cuda_concat(sstring, len, "[", 1);
len += 1;
char s4[20];
int l = dtoa(lon_data[k], s4);
printf("============%lu, %lf\n", k, lon_data[k]);
cuda_concat(sstring, len, s4, l);
len += l;
cuda_concat(sstring, len, ", ", 2);
len += 2;
char s5[20];
l = dtoa(lat_data[k], s5);
cuda_concat(sstring, len, s5, l);
len += l;
cuda_concat(sstring, len, "], ", 3);
len += 3;
// finish the json stream
cuda_concat(sstring, len, "[0, 0]]", 8);
len += 7;
sstring[len] = '\0';
}
}
};
int main()
{
std::vector<int> h_tid_vec(6);
h_tid_vec[0] = 98;
h_tid_vec[1] = 98;
h_tid_vec[2] = 98;
h_tid_vec[3] = 86;
h_tid_vec[4] = 86;
h_tid_vec[5] = 86;
std::vector<double> h_lon_vec(6);
h_lon_vec[0] = 40.1162;
h_lon_vec[1] = 39.9399;
h_lon_vec[2] = 39.9401;
h_lon_vec[3] = 121.123;
h_lon_vec[4] = 812.123;
h_lon_vec[5] = 82.23;
thrust::device_vector<int> tid_vec = h_tid_vec;
thrust::device_vector<double> lon_vec = h_lon_vec;
thrust::device_vector<double> lat_vec = h_lon_vec;
thrust::device_vector<char *> res_vec(2);
thrust::device_vector<int> holder_vec(6, 0);
thrust::device_vector<size_t> position_vec(6, 0);
holder_vec[0] = 1;
holder_vec[5] = 1;
is_equal_count iec(thrust::raw_pointer_cast(tid_vec.data()), thrust::raw_pointer_cast(holder_vec.data()) );
thrust::for_each(thrust::make_counting_iterator(0), thrust::make_counting_iterator(6), iec);
thrust::copy_if(thrust::make_counting_iterator(0), thrust::make_counting_iterator(6), holder_vec.begin(), position_vec.begin(), thrust::placeholders::_1 == 1);
thrust::copy(position_vec.begin(), position_vec.end(), std::ostream_iterator<size_t>(std::cout, ","));
for (int i = 0; i < 2; i++)
{
// thrust::device_vector<char> dev_str(3, 0);
// res_vec[i] = thrust::raw_pointer_cast(dev_str.data());
// char *d_str;
// cudaMalloc((void**)&d_str, 3*sizeof(char));
char *device_array_a = 0 ;
cudaMalloc((void**)&device_array_a, 200);
res_vec[i] = device_array_a;
}
check_stream cs
(
thrust::raw_pointer_cast(position_vec.data()),
5,
thrust::raw_pointer_cast(tid_vec.data()),
thrust::raw_pointer_cast(lon_vec.data()),
thrust::raw_pointer_cast(lat_vec.data()),
thrust::raw_pointer_cast(res_vec.data())
);
thrust::for_each(thrust::make_counting_iterator(0), thrust::make_counting_iterator(6), cs);
// thrust::device_vector<unsigned int > index(2);
// thrust::sequence(index.begin(), index.end());
// make_stream ms
// (
// thrust::raw_pointer_cast(index.data()),
// thrust::raw_pointer_cast(tid_vec.data()),
// thrust::raw_pointer_cast(lon_vec.data()),
// thrust::raw_pointer_cast(lat_vec.data()),
// thrust::raw_pointer_cast(res_vec.data())
// );
// thrust::for_each(thrust::make_counting_iterator(0), thrust::make_counting_iterator(5), ms);
for (int i = 0; i < 2; i++)
{
// thrust::host_vector<char*> h_res( &res_vec[0], &res_vec[0] + 2 );
char *h_array_a = (char *)calloc(200, 1);
cudaMemcpy(h_array_a, res_vec[i], 200, cudaMemcpyDeviceToHost);
printf("-----%s\n", h_array_a);
}
} |
5,925 | #include "prefix_sum_cuda.cuh"
__global__ void prefix_sum_cuda(int *a, size_t N) {
int tid = threadIdx.x;
int i = 0;
for (i = 2; i <= N; i *= 2) {
if (((i - tid % i) == 1) && tid != 0) {
a[tid] = a[tid] + a[tid - i / 2];
}
__syncthreads();
}
if (tid == N - 1) {
a[tid] = 0;
}
for (; i > 1; i /= 2) {
if (((i - tid % i) == 1) && tid != 0) {
int temp = a[tid - i / 2];
a[tid - i / 2] = a[tid];
a[tid] = a[tid] + temp;
}
__syncthreads();
}
// a[tid] += d_in[tid];
} |
5,926 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
__global__ void axpy(float a, float *xVec, float *yVec){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
yVec[idx] = a*xVec[idx] + yVec[idx];
}
int main(int argc, char** argv){
int N = atoi(argv[1]);
float a = 0.5;
float *x_host = (float *)malloc(N*sizeof(float));
float *y_host = (float *)malloc(N*sizeof(float));
int i;
for(i=0; i<N; i++){
x_host[i] = (float)i;
y_host[i] = 1.0;
}
for(i=0; i<N; i++) printf("i: %d, x[i]:, %f, y[i]: %f \n", i, x_host[i], y_host[i]);
float * x_device;
float * y_device;
cudaMalloc((void**)&x_device, N*sizeof(float));
cudaMalloc((void**)&y_device, N*sizeof(float));
// copy from host to device
cudaMemcpy(x_device, x_host, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_device, y_host, N*sizeof(float), cudaMemcpyHostToDevice);
int n_threads_per_block = 16;
int n_blocks = N/n_threads_per_block;
axpy <<<n_blocks, n_threads_per_block>>> (a, x_device, y_device);
cudaMemcpy(y_host, y_device, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(x_device);
cudaFree(y_device);
x_device = NULL;
y_device = NULL;
printf("After axpy (0.5*x + y): \n");
for(i=0; i<N; i++) printf("%f \n", y_host[i]);
free(x_host);
free(y_host);
x_host=NULL;
y_host=NULL;
return 0;
}
|
5,927 | /*https://cdac.in/index.aspx?id=ev_hpc_gpu-comp-nvidia-cuda-streams#hetr-cuda-prog-cuda-streams*/
#include <stdio.h>
#include <time.h>
#include <cuda.h>
//#define sizeOfArray 1024*1024
// 1. Execute Everything synchronously
// 2. Execute Everything asynchronously
// 3. Execute Memcpy Synchronously and kernel launch Asynchronously
// 4. Execute Memcpy asynchronously and kernel launch synchronously
// ** Measure variability across the above by chaning data size **
// ** Utilize Multiple Streams like in the example: https://devblogs.nvidia.com/gpu-pro-tip-cuda-7-streams-simplify-concurrency/ **
// Put global variables here
int REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, *MAX_THREADS_DIM;
size_t TOTAL_GLOBAL_MEM, TOTAL_CONST_MEM;
__global__ void arrayAddition(int *device_a, int *device_b, int *device_result, int sizeOfArray)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x ;
if (threadId < sizeOfArray)
device_result[threadId]= device_a[threadId]+device_b[threadId];
}
/*
*checkCuda: will check to see if there is an error returned by CUDA runtime
*/
inline
void checkCuda(cudaError_t errMsg, const char* errContext)
{
if(errMsg != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error From %s: %s\n", errContext, cudaGetErrorString(errMsg));
exit(EXIT_FAILURE);
}
}
/*
* getCUDAInfo() - originally named "getHardwareContraints in module 3
* - this function will get CUDA information pertaining to the hardware
* on which we are operating... the code can then reason on these reports to determine
* the best way to structure memory transfers between the host and device
*/
void getCUDAInfo() {
//=============================Gets number of cuda devices===========================================
int deviceCount = 0;
checkCuda(cudaGetDeviceCount(&deviceCount), "Failed deviceCount load");
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
//=============================Gets number of cuda devices===========================================
// for each device found, store this device in some type of object
int device;
for (device = 0; device < deviceCount; device++) {
// Sets the context of the device so that we know which device we are working with if there
// are multiple
cudaSetDevice(device);
cudaDeviceProp deviceProp;
// gets the "properties" struct that stores the properties of a device
// from this property struct, we can query the limitations of this device
cudaGetDeviceProperties(&deviceProp, device);
printf("\nDevice: %d \"%s\"\n===========================================\n", device, deviceProp.name);
TOTAL_GLOBAL_MEM = deviceProp.totalGlobalMem;
REGS_PER_BLOCK = deviceProp.regsPerBlock;
WARP_SIZE = deviceProp.warpSize;
MAX_THREADS_PER_BLOCK = deviceProp.maxThreadsPerBlock;
MAX_THREADS_DIM = deviceProp.maxThreadsDim;
TOTAL_CONST_MEM = deviceProp.totalConstMem;
printf("The %s has:\n\t-%zu total bytes of global memory\n\t-%zu bytes of constant memory\n\t-%d registers per block\n\t-%d threads per warp\n\t-A maximum of %d threads per block\n\t-A maximum thread dimension of %d x %d x %d\n", deviceProp.name, TOTAL_GLOBAL_MEM,TOTAL_CONST_MEM, REGS_PER_BLOCK, WARP_SIZE, MAX_THREADS_PER_BLOCK, MAX_THREADS_DIM[0], MAX_THREADS_DIM[1], MAX_THREADS_DIM[2]);
// What I think we care about:
// 1. totalGlobalMem
// 2. regsPerBlock
// 3. warpSize (i.e. numThreadsPerBlock (is this equal to regsPerBlock??)
// 4. maxThreadsperBlock
// 5. maxThreadsDim[3]
}
}
bool verifyResult(int *host_a, int *host_b, int **host_result,
int num_streams, int sizeOfArray) {
bool testPassed = true;
for(int i = 0; i < num_streams; i++) {
for(int j = 0; j < sizeOfArray; j++) {
if((host_a[j] + host_b[j]) != host_result[i][j]) {
testPassed = false;
printf("Case %d Failed on Stream %d, iteration %d: %d + %d = Serial Result: %d ParallelResult: %d\n", 0,i, j, host_a[j], host_b[j], host_a[j] + host_b[j], host_result[i][j]);
}
}
}
return testPassed;
}
float testStream(int **device_a, int **device_b, int **device_result,
int *host_a, int *host_b, int **host_result,
cudaStream_t *streams, cudaEvent_t start, cudaEvent_t stop,
int num_streams, int sizeOfArray,int sync_test,
int gridSize, int blockSize) {
float elapsedTime = 0;
for(int i = 0; i < num_streams; i++) {
// Create new stream on each iteration
checkCuda(cudaStreamCreate(&streams[i]), "cudaStreamCreate");
// Allocate device memory for each iteration
cudaMalloc( ( void**)& device_a[i], sizeOfArray * sizeof ( **device_a ) );
cudaMalloc( ( void**)& device_b[i],sizeOfArray * sizeof ( **device_b ) );
cudaMalloc( ( void**)& device_result[i], sizeOfArray * sizeof ( **device_result ) );
cudaEventRecord(start,0);
switch(sync_test) {
// Synchronous memcpy and kernel launch
case 0:
checkCuda(cudaMemcpy(device_a[i], host_a,sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice), "host to device async memcpy 1");
checkCuda(cudaMemcpy(device_b[i], host_b, sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice), "host to device async memcpy 2");
arrayAddition <<<gridSize, blockSize, blockSize * sizeof(int)>>>(device_a[i], device_b[i], device_result[i], sizeOfArray);
checkCuda(cudaMemcpy(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), cudaMemcpyDeviceToHost), "device to host async memcpy");
break;
// Asynchronous memcpy and synchronous kernel launch
case 1:
checkCuda(cudaMemcpyAsync(device_a[i], host_a,sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, streams[i]), "host to device async memcpy 1");
checkCuda(cudaMemcpyAsync(device_b[i], host_b, sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, streams[i]), "host to device async memcpy 2");
arrayAddition <<<gridSize, blockSize, blockSize * sizeof(int)>>>(device_a[i], device_b[i], device_result[i], sizeOfArray);
checkCuda(cudaMemcpyAsync(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), cudaMemcpyDeviceToHost, streams[i]), "device to host async memcpy");
break;
// Synchronous memcpy and asynchronous kernel launch
case 2:
checkCuda(cudaMemcpy(device_a[i], host_a,sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice), "host to device async memcpy 1");
checkCuda(cudaMemcpy(device_b[i], host_b, sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice), "host to device async memcpy 2");
arrayAddition <<<gridSize, blockSize, blockSize * sizeof(int), streams[i]>>>(device_a[i], device_b[i], device_result[i], sizeOfArray);
checkCuda(cudaMemcpy(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), cudaMemcpyDeviceToHost), "device to host async memcpy");
break;
case 3:
checkCuda(cudaMemcpyAsync(device_a[i], host_a,sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, streams[i]), "host to device async memcpy 1");
checkCuda(cudaMemcpyAsync(device_b[i], host_b, sizeOfArray * sizeof ( int ), cudaMemcpyHostToDevice, streams[i]), "host to device async memcpy 2");
arrayAddition <<<gridSize, blockSize, blockSize * sizeof(int), streams[i]>>>(device_a[i], device_b[i], device_result[i], sizeOfArray);
checkCuda(cudaMemcpyAsync(host_result[i], device_result[i], sizeOfArray * sizeof ( int ), cudaMemcpyDeviceToHost, streams[i]), "device to host async memcpy");
break;
default:
break;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
return elapsedTime;
}
/* Check for safe return of all calls to the device */
int main ( int argc, char **argv )
{
// Take block size and data size as input
// Check block size to make sure it does not exceed the maximum number of threads per block
int blockSize = 512;
int sizeOfArray = 1024 * 1024;
if(argc == 3) {
//numRuns = atoi(argv[1]);
blockSize = atoi(argv[1]);
sizeOfArray = atoi(argv[2]);
}
const int num_streams = 10;
int *host_a, *host_b, *host_result[num_streams];
int *device_a[num_streams], *device_b[num_streams], *device_result[num_streams];
getCUDAInfo();
int gridSize = ((sizeOfArray % blockSize) == 0) ? (sizeOfArray / blockSize) : ((sizeOfArray / blockSize) + 1);
// Create array to store each stream:
cudaStream_t streams[num_streams];
cudaEvent_t start, stop;
float elapsedTime0, elapsedTime1 = 0, elapsedTime2 = 0, elapsedTime3 = 0 ;
cudaEventCreate( &start );
cudaEventCreate( &stop );
// Allocate host memory
cudaHostAlloc((void **)&host_a, sizeOfArray*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&host_b, sizeOfArray*sizeof(int), cudaHostAllocDefault);
for(int i = 0; i < num_streams; i++) {
cudaHostAlloc((void **)&host_result[i], num_streams*sizeOfArray*sizeof(int), cudaHostAllocDefault);
}
// Initiailize host memory
for(int index = 0; index < sizeOfArray; index++)
{
host_a[index] = rand()%10;
host_b[index] = rand()%10;
}
//cudaEventRecord(start,0);
elapsedTime0 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 0, gridSize, blockSize);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime0, start, stop);
verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray);
//cudaEventRecord(start, 0);
elapsedTime1 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 1, gridSize, blockSize);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime1, start, stop);
verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray);
//cudaEventRecord(start, 0);
elapsedTime2 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 2, gridSize, blockSize);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime2, start, stop);
verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray);
//cudaEventRecord(start, 0);
elapsedTime3 = testStream(device_a, device_b, device_result, host_a, host_b, host_result, streams, start, stop, num_streams, sizeOfArray, 3, gridSize, blockSize);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime3, start, stop);
verifyResult(host_a, host_b, host_result, num_streams, sizeOfArray);
/*cudaStream_t stream;
cudaStreamCreate(&stream); */
//printf("*********** CDAC - Tech Workshop : hyPACK-2013 \n");
printf("\n Size of array : %d \n", sizeOfArray);
printf("\n Time taken for synchronous memcpy and synchronous kernel launch: %f ms \n", elapsedTime0);
printf("\n Time taken for asynchronous memcpy and synchronous kernel launch: %f ms \n", elapsedTime1);
printf("\n Time taken for synchronous memcpy and asynchronous kernel launch: %f ms \n", elapsedTime2);
printf("\n Time taken for asynchronous memcpy and asynchronous kernel launch: %f ms \n", elapsedTime3);
for(int i = 0; i < num_streams; i++) {
cudaStreamDestroy(streams[i]);
}
cudaEventDestroy(stop);
cudaEventDestroy(start);
cudaFreeHost(host_a);
cudaFreeHost(host_b);
cudaFreeHost(host_result);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_result);
return 0;
}
|
5,928 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#define LY 9460730472580800 // Light-year
#define G 6.67408e-11
#define BLOCK_Z 1
#define BLOCK_Y 1
#define BLOCK_X 1024
#define GRID_Z 1
#define GRID_Y 1
#define GRID_X 1024
#define TotalPoint BLOCK_X * BLOCK_Y * BLOCK_Z * GRID_X * GRID_Y * GRID_Z
#define BlackHoles 100
#define rool 40
#define dt 10 * 365 * 24 * 3600.0f
#define half_dt dt/2
#define BodiesPerSave TotalPoint*(TotalPoint/ 1000000000.0)*rool
#define Rx_u 300 * LY
#define Ry_u 300 * LY
#define Rz_u 300 * LY
#define Rx_l -300 * LY
#define Ry_l -300 * LY
#define Rz_l -300 * LY
#define Rvx_u 1e6
#define Rvy_u 1e6
#define Rvz_u 1e6
#define Rvx_l -1e6
#define Rvy_l -1e6
#define Rvz_l -1e6
#define Rm_u 1e30
#define Rm_l 1e10
#define fix 1e3
#define flopf (19*TotalPoint + 15)*1e-9
#define flopS flopf*rool*TotalPoint
void GenerateRandomPoints(float4 *Point, float4 *Point_v) {
srand(time(NULL));
//Generate random location for points.
for (int Tv=0; Tv < TotalPoint; Tv++) {
Point[Tv].x = rand()/(float)RAND_MAX * (Rx_u - Rx_l) + Rx_l;
Point[Tv].y = rand()/(float)RAND_MAX * (Ry_u - Ry_l) + Ry_l;
Point[Tv].z = rand()/(float)RAND_MAX * (Rz_u - Rz_l) + Rz_l;
//Point_Gmdt = Point.w
Point[Tv].w = rand()/(float)RAND_MAX * (Rm_u + Rm_l) * G * dt;
Point_v[Tv].x = rand()/(float)RAND_MAX * (Rvx_u - Rvx_l) + Rvx_l;
Point_v[Tv].y = rand()/(float)RAND_MAX * (Rvy_u - Rvy_l) + Rvy_l;
Point_v[Tv].z = rand()/(float)RAND_MAX * (Rvz_u - Rvz_l) + Rvz_l;
}
for (int Tv=0; Tv < BlackHoles; Tv++) {
Point[Tv].w = 1e10 * Rm_u * G * dt;
}
}
void Save(float4 *Point) {
FILE *save;
if ((save=fopen("data.data", "a+")) == NULL) {
printf("Can't save data.\n");
}
//Data = [[x1, x2, ...], [y1, y2, ...], [z1, z2, ...]]
fprintf(save, "[");
//Print P_xs;
fprintf(save, "[");
for (int i=0; i < TotalPoint; i++) {
fprintf(save, "%.2f", Point[i].x);
if (i != TotalPoint-1)
fprintf(save, ", ");
}
fprintf(save, "]");
//Print P_ys;
fprintf(save, ", [");
for (int i=0; i < TotalPoint; i++) {
fprintf(save, "%.2f", Point[i].y);
if (i != TotalPoint-1)
fprintf(save, ", ");
}
fprintf(save, "]");
//Print P_zs;
fprintf(save, ", [");
for (int i=0; i < TotalPoint; i++) {
fprintf(save, "%.2f", Point[i].z);
if (i != TotalPoint-1)
fprintf(save, ", ");
}
fprintf(save, "]");
fprintf(save, "]\n"); // The end.
fclose(save);
}
__global__ void CaculateTheNextTick(float4 *Point, float4 *Point_v, float4 *T) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // Get thread's index
//if (i < TotalPoint) {
float da_i = 0.0f;
float rd = 0.0f;
float x_i = Point[i].x;
float y_i = Point[i].y;
float z_i = Point[i].z;
float dx = 0.0f;
float dy = 0.0f;
float dz = 0.0f;
float da_ix = 0.0f;
float da_iy = 0.0f;
float da_iz = 0.0f;
__shared__ float4 Temp[BLOCK_X];
#pragma unroll
for (int tile=0; tile < gridDim.x; tile++) {
Temp[threadIdx.x] = Point[tile * blockDim.x + threadIdx.x];
__syncthreads();
for (int j=0; j<BLOCK_X; j++) {
dx = Temp[j].x - x_i;//1 flo
dy = Temp[j].y - y_i;//1 flo
dz = Temp[j].z - z_i;//1 flo
rd = rsqrtf((dx * dx) + (dy * dy) + (dz * dz) + fix);//7 flo
da_i = Temp[j].w * rd * rd * rd;//3 flo
da_ix += dx * da_i;//2 flo
da_iy += dy * da_i;//2 flo
da_iz += dz * da_i;//2 flo
}//total 19 * TotalPoint flo
__syncthreads();
}
T[i].x = x_i + Point_v[i].x * dt + half_dt * da_ix;//4 flo
T[i].y = y_i + Point_v[i].y * dt + half_dt * da_iy;//4 flo
T[i].z = z_i + Point_v[i].z * dt + half_dt * da_iz;//4 flo
Point_v[i].x = Point_v[i].x + da_ix;//1 flo
Point_v[i].y = Point_v[i].y + da_iy;//1 flo
Point_v[i].z = Point_v[i].z + da_iz;//1 flo
//}//total 15 + 19*TotalPoint flo
}
int main() {
//Define what we need on CPU.
float4 *Point;
float4 *Point_v;
Point = (float4 *)malloc(TotalPoint * sizeof(float4));
Point_v = (float4 *)malloc(TotalPoint * sizeof(float4));
//Define what we need on GPU.
float4 *GPU_Point;
float4 *GPU_Point_v;
float4 *GPU_T;
cudaMalloc((void**)&GPU_Point, TotalPoint * sizeof(float4));
cudaMalloc((void**)&GPU_Point_v, TotalPoint * sizeof(float4));
cudaMalloc((void**)&GPU_T, TotalPoint * sizeof(float4));
int count = 0;
float starttime, endtime;
dim3 grid(GRID_X, GRID_Y, GRID_Z);
dim3 block(BLOCK_X, BLOCK_Y, BLOCK_Z);
FILE *save;
if ((save=fopen("data.data", "w")) == NULL) {
printf("Can't save data.\n");
}
fclose(save);
//Generate random point.
GenerateRandomPoints(Point, Point_v);
cudaMemcpy(GPU_Point, Point, TotalPoint * sizeof(float4), cudaMemcpyHostToDevice);
cudaMemcpy(GPU_Point_v, Point_v, TotalPoint * sizeof(float4), cudaMemcpyHostToDevice);
free(Point_v);
printf("Start calc. N=%d, dt=%f, frame per save=%d\n", TotalPoint, dt, rool);
while (1==1) {
count++;
printf("[Save %d]: Computing... ", count);
//Caculate the location of next tick.
starttime = clock();
for (int k=0; k < rool; k++) {
if (k%2) {
CaculateTheNextTick<<<grid, block>>>(GPU_T, GPU_Point_v, GPU_Point);
} else {
CaculateTheNextTick<<<grid, block>>>(GPU_Point, GPU_Point_v, GPU_T);
}
//cudaDeviceSynchronize();
cudaThreadSynchronize();
//Update the locations of particles.
//cudaMemcpy(GPU_Point_x, GPU_T_x, size, cudaMemcpyDeviceToDevice);
//cudaMemcpy(GPU_Point_y, GPU_T_y, size, cudaMemcpyDeviceToDevice);
//cudaMemcpy(GPU_Point_z, GPU_T_z, size, cudaMemcpyDeviceToDevice);
}
endtime = clock();
cudaMemcpy(Point, GPU_T, TotalPoint * sizeof(float4), cudaMemcpyDeviceToHost);
printf("Done. %.2lf fps, %.3lf Sps, %.2lf GBps, %.2lf GFLOPS",
rool / (endtime-starttime)*CLOCKS_PER_SEC,
1 / (endtime-starttime)*CLOCKS_PER_SEC,
BodiesPerSave / (endtime-starttime)*CLOCKS_PER_SEC,
flopS / (endtime-starttime)*CLOCKS_PER_SEC);
printf(" Saving... ");
Save(Point);
printf("Done. \n");
}
//fclose(save);
//General end of C programs.
return 0;
}
|
5,929 | #include "includes.h"
__global__ void Float(float * x, bool* y, size_t idxf, size_t idxb, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
x[(idxf)*N + i] = float(y[(idxb-1)*N + i]);
return;
} |
5,930 | #include "includes.h"
__global__ void profileLevelZero_kernel() {} |
5,931 | #include <iostream>
#include <math.h>
//__global__声明该函数为需要在GPU上计算的核函数
__global__ void add(int n, float *x, float *y)
{
for (int i=0;i<n;i++)
y[i] = x[i] + y[i];
}
int main()
{
int N = 1<<20;
float *x,*y;
//在GPU上开辟内存
cudaMallocManaged(&x,N*sizeof(float));
cudaMallocManaged(&y,N*sizeof(float));
// 初始化
for (int i=0;i<N;i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// 在GPU上计算
add<<<1,1>>>(N,x,y);
// 在访问Host之前,先等待GPU的运算结束
cudaDeviceSynchronize();
//检查误差:数组y所有的值都应该为3
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error:" << maxError << std::endl;
// 释放内存
cudaFree(x);
cudaFree(y);
return 0;
} |
5,932 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
struct saxpy_functor {
const float m_a;
saxpy_functor(float a) : m_a(a) {}
__host__ __device__
float operator()(const float& x, const float& y) const {
return m_a * x + y;
}
};
struct offset_functor {
const float m_offset;
offset_functor(float offset) : m_offset(offset) {}
__host__ __device__
float operator()(const float& value) const {
return value - m_offset;
}
};
int main(int argc, char* argv[]) {
thrust::device_vector<int> Y(10000, 0);
thrust::device_vector<int> X(10000, 10);
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(10));
thrust::transform(Y.begin(), Y.end(), Y.begin(), offset_functor(10*10));
float result = thrust::reduce(Y.begin(), Y.end());
std::cout << "Sum of deviations from expected result " << result << std::endl;
}
|
5,933 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define Filas 5
#define Columnas 7
#define NbloquesX 3
#define NbloquesY 3
#define NhebrasX 3
#define NhebrasY 2
__global__ void
trasponer(int *dev_a, int *dev_b)
{
int position_x = blockIdx.x * blockDim.x + threadIdx.x;
int position_y = blockDim.y * blockIdx.y + threadIdx.y;
int position = position_y * Columnas + position_x;
int position_reverse = position_x * Filas + position_y;
if (position_x >= Columnas || position_y >= Filas){
return;
}else{
dev_b[position_reverse] = dev_a[position];
}
}
int
main(int argc, char** argv)
{
int a[Filas][Columnas], b[Columnas][Filas];
int *dev_a, *dev_b;
int i,j,pos=0;
dim3 nbloques(NbloquesX,NbloquesY);
dim3 nhebras(NhebrasX,NhebrasY);
cudaMalloc((void**) &dev_a, Filas * Columnas * sizeof(int));
cudaMalloc((void**) &dev_b, Filas * Columnas * sizeof(int));
// fill the arrays 'a' and 'b' on the CPU
for (i=0; i<Filas; i++) {
for(j=0; j<Columnas; j++){
a[i][j]= pos++;
}
}
cudaMemcpy(dev_a, a, Filas * Columnas * sizeof(int), cudaMemcpyHostToDevice);
trasponer<<<nbloques, nhebras>>>(dev_a, dev_b);
cudaMemcpy(b, dev_b, Filas * Columnas * sizeof(int), cudaMemcpyDeviceToHost);
printf("\nMatriz Origen\n");
for (i=0; i< Filas; i++) {
for(j=0; j< Columnas; j++){
printf("%d\t", a[i][j]);
}
printf("\n");
}
printf("\nMatriz Traspuesta\n");
for (i=0; i< Columnas; i++) {
for(j=0; j< Filas; j++){
printf("%d\t", b[i][j]);
}
printf("\n");
}
cudaFree(dev_a);
cudaFree(dev_b);
return 0;
}
|
5,934 | #include<stdio.h>
#include<stdlib.h>
#define RADIUS 3
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
__global__ void stencil_1d(int *in, int *out) {
__shared__ int temp[THREADS_PER_BLOCK + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
temp[lindex] = in[gindex];
if(threadIdx.x < RADIUS){
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + THREADS_PER_BLOCK] = in[gindex + THREADS_PER_BLOCK];
}
__syncthreads();
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++){
result += temp[lindex + offset];
}
out[gindex] = result;
}
void random_ints(int* a);
int main(void){
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
int i;
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
random_ints(a);
random_ints(b);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
stencil_1d<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(i = 0; i < 512; i++){
printf("%d ",c[i]);
if(i%5 == 0)
printf("\n");
}
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_a);
cudaFree(d_a);
return 0;
}
void random_ints(int* a)
{
int i;
for ( i = 0; i < 512; ++i)
a[i] = rand();
}
|
5,935 | #include<bits/stdc++.h>
using namespace std;
const double pi = 3.14159265358979323846264;
const double L = 550;
const double Diff = 1.;
const int MAX_BLOCK_WIDTH = 32;
// In this method, we use squre cells of threads, but we need to specify the size of the square.
/*
| coordinate system:
-|---------------y
| x = i * d_x
| y = i * d_x
|
x
*/
inline double left(double y) { return 0; }
inline double right(double y) { return 0; }
inline double bottom(double x) { return 0; }
inline double top(double x){ return sinh(pi)*sin(x*pi/L); }
inline double analytical(double x,double y){
return sinh(y*pi/L)*sin(x*pi/L);
}
__global__ void oneIteration(int N, int cell_size, double *cur, double *old, double delta_x,double delta_t){
// Run one step of iteration with multiple blocks of threads
int b_size = blockDim.x, b_id = blockIdx.x, t_id = threadIdx.x, in_cell = cell_size - 2;
int n_cell = (N + in_cell -2)/in_cell;
assert(N > 1);
assert(cell_size <= MAX_BLOCK_WIDTH);
assert(b_size == cell_size*cell_size); // Here we only use square cells
__shared__ double tmp_arr[MAX_BLOCK_WIDTH][MAX_BLOCK_WIDTH]; // Define a buffer in the shared memory of one block of threads
int dX[8] = {0, 0, 1, -1, 1, -1, 1, -1};
int dY[8] = {1, -1, 0, 0, 1, 1, -1, -1};
// Compute indices in the cell and in the original matrix
int local_i = t_id/cell_size, local_j = t_id%cell_size;
int global_i = (b_id/n_cell)*in_cell + local_i, global_j = (b_id%n_cell)*in_cell + local_j;
// Copy the old data into the buffer array, then synchronize threads
if(global_i>=0 && global_i<=N && global_j>=0 && global_j<=N){
tmp_arr[local_i][local_j] = old[global_i*(N+1) + global_j];
// Maintain the Dirichlet Type Boundary Conditions.
if(!global_i || !global_j || global_i==N || global_j==N) cur[global_i*(N+1)+global_j] = tmp_arr[local_i][local_j];
}
__syncthreads();
// Compute the updated value and store it into *cur
if(local_i && local_i<cell_size-1 && local_j && local_j<cell_size-1 && global_i && global_i<N && global_j && global_j<N){
double nn_diff = 0.;
for(int k=0;k<8;++k) nn_diff += tmp_arr[local_i+dX[k]][local_j+dY[k]];
nn_diff -= 8.*tmp_arr[local_i][local_j];
cur[global_i*(N+1) + global_j] = tmp_arr[local_i][local_j] + Diff*delta_t*nn_diff/3./pow(delta_x, 2.);
}
return;
}
__global__ void iterationWithOneBlock(int N,int N_step, int nx_thread, double *cur, double *tmp, double delta_x, double delta_t){
// Run multiple steps of iterations with one block of threads
int t_id = threadIdx.x, b_size = blockDim.x, t_width = (N-2+nx_thread)/nx_thread;
assert(b_size == nx_thread*nx_thread);
// Define neighbor vectors:
int dX[8] = {0, 0, 1, -1, 1, -1, 1, -1};
int dY[8] = {1, -1, 0, 0, 1, 1, -1, -1};
int global_i = (t_id/nx_thread)*t_width, globel_j = (t_id%nx_thread)*t_width;
for(int step=0;step<N_step;++step){
for(int i=global_i;i<global_i+t_width;++i) for(int j=globel_j;j<globel_j+t_width;++j){
if(i && i<N && j && j<N){
double nn_sum = 0., coeff = Diff*delta_t/3./pow(delta_x, 2.);
for(int k=0;k<8;++k) nn_sum += cur[(i+dX[k])*(N+1) + j+dY[k]];
tmp[i*(N+1) + j] = (1.-8.*coeff)*cur[i*(N+1) + j] + coeff*nn_sum;
}
}
__syncthreads();
for(int i=global_i;i<global_i+t_width;++i) for(int j=globel_j;j<globel_j+t_width;++j){
if(i && i<N && j && j<N) cur[i*(N+1) + j] = tmp[i*(N+1) + j];
}
__syncthreads();
}
return;
}
__global__ void cudaGetError(int N, double *ana, double *cur, double *e_sum){
// Parallelly compute the error
int index = blockIdx.x*blockDim.x + threadIdx.x;
if(index < (N+1)*(N+1)) (*e_sum) += (ana[index] - cur[index])*(ana[index] - cur[index]);
return;
}
class DiffEqnSolver{
int n_grid, array_size, in_cell, cell_size, n_cell;
int t_width, nx_thread;
double d_x, **val, **ana, *cur, *old, *d_ana, *err_sum;
public:
DiffEqnSolver(int N):n_grid(N){
d_x = L/n_grid;
array_size = (n_grid+1)*(n_grid+1);
val = new double* [n_grid + 1];
val[0] = new double [array_size];
for(int i=1;i<=n_grid;++i) val[i] = val[i-1] + n_grid + 1;
ana = new double* [n_grid + 1];
ana[0] = new double [array_size];
for(int i=1;i<=n_grid;++i) ana[i] = ana[i-1] + n_grid + 1;
cudaMalloc((void **)&cur, array_size*sizeof(double));
cudaMalloc((void **)&old, array_size*sizeof(double));
cudaMalloc((void **)&d_ana, array_size*sizeof(double));
cudaMalloc((void **)&err_sum, sizeof(double));
// Setting the boundary conditions
for(int i=0;i<=n_grid;++i){
val[0][i] = left(i*d_x);
val[n_grid][i] = right(i*d_x);
val[i][0] = bottom(i*d_x);
val[i][n_grid] = top(i*d_x);
}
// Get the analytical solution
for(int i=0;i<=n_grid;++i) for(int j=0;j<=n_grid;++j) ana[i][j] = analytical(i*d_x, j*d_x);
}
void init(double init_val){
for(int i=1;i<n_grid;++i) for(int j=1;j<n_grid;++j) val[i][j] = init_val;
}
// Compute errors (L2 norm)
double getError(){
double sum = 0.;
for(int i=0;i<=n_grid;++i) for(int j=0;j<=n_grid;++j)
sum += pow(val[i][j] - ana[i][j],2.);
return sqrt(sum);
}
double getErrorUsingCuda(){
double init_sum = 0.;
cudaMemcpy(d_ana, ana[0], array_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(cur, val[0], array_size*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(err_sum, &init_sum, sizeof(double), cudaMemcpyHostToDevice);
int b_sz = min(MAX_BLOCK_WIDTH * MAX_BLOCK_WIDTH, array_size);
int n_blk = (array_size + b_sz - 1)/b_sz;
cudaGetError<<<n_blk, b_sz>>>(n_grid, d_ana, cur, err_sum);
cudaMemcpy(&init_sum, err_sum, sizeof(double), cudaMemcpyDeviceToHost);
return sqrt(init_sum);
}
// Get grid size and block size
void setUpGrid(int c_size){
cell_size = c_size;
assert(cell_size > 2 && cell_size <= MAX_BLOCK_WIDTH);
in_cell = cell_size - 2;
n_cell = (n_grid + in_cell - 2)/in_cell;
}
// One step of iteration with multiple blocks
void oneStep(double d_t){
cudaMemcpy(old, val[0], array_size*sizeof(double), cudaMemcpyHostToDevice);
oneIteration<<<n_cell*n_cell, cell_size*cell_size>>>(n_grid, cell_size, cur, old, d_x, d_t);
cudaMemcpy(val[0], cur, array_size*sizeof(double), cudaMemcpyDeviceToHost);
}
// Run multiple iterations with multiple blocks
double runIterations(int N_step, double d_t){
for(int t=0;t<N_step;++t) oneStep(d_t);
return getError();
}
// Get block size if we use only one block of threads
void setUpBlock(int nx_t){
nx_thread = nx_t;
assert(nx_thread > 0 && nx_thread <= MAX_BLOCK_WIDTH);
t_width = (n_grid-2+nx_thread)/nx_thread;
}
// Run multiple iterations with only one block of threads
double runWithOneBlock(int N_step, double d_t){
cudaMemcpy(cur, val[0], array_size*sizeof(double), cudaMemcpyHostToDevice);
iterationWithOneBlock<<<1, nx_thread*nx_thread>>>(n_grid, N_step, nx_thread, cur, old, d_x, d_t);
cudaMemcpy(val[0], cur, array_size*sizeof(double), cudaMemcpyDeviceToHost);
return getError();
}
void fileOutPut(string filename){
FILE *fp = fopen(filename.c_str(), "w");
if (fp == NULL) {
fprintf(stderr, "Can't open output file %s!\n", filename.c_str());
exit(1);
}
for(int i=0;i<=n_grid;++i) for(int j=0;j<=n_grid;++j){
fprintf(fp, "%lf %lf %lf\n", i*d_x, j*d_x, val[i][j]);
}
fclose(fp);
}
~DiffEqnSolver(){
delete [] val[0];
delete [] val;
delete [] ana[0];
delete [] ana;
cudaFree(cur);
cudaFree(old);
cudaFree(d_ana);
cudaFree(err_sum);
}
};
int main(int argc, char *argv[]){
int block_width = 16;
if(argc > 1) block_width = stoi(argv[1]);
int nL = (int)L;
DiffEqnSolver solver(nL);
solver.init(1.);
int n_batch = 21, n_step = 1000;
double dt = 0.5;
cout<<setprecision(3);
cout<<"Start running iterations:"<<endl;
clock_t start_time = clock(), end_time;
solver.setUpGrid(block_width);
for(int i=1;i<=n_batch;++i){
if(false){
string filename = "data"+to_string(i/4);
solver.fileOutPut(filename);
}
cout<<"Iteration: "<<i<<"\t error:"<<solver.runIterations(n_step, dt)<<endl;
}
//solver.setUpBlock(block_width);
//for(int i=1;i<=n_batch;++i) cout<<"Iteration: "<<i<<"\t error:"<<solver.runWithOneBlock(n_step, dt)<<endl;
end_time = clock();
cout<<"End running iterations!"<<endl<<endl;
cout<<"Time spent during iterations: "<<double(end_time-start_time)/CLOCKS_PER_SEC<<"s\n\n\n";
cout<<"================================================================================"<<endl;
return 0;
}
|
5,936 | #include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_W 16
#define BLOCK_H 16
//--------------------------------------------------------------------------------------------------------------------
__global__ void median_filter(const unsigned char *in, unsigned char *out, const unsigned int w, const unsigned int h) {
__shared__ unsigned short surround[BLOCK_W*BLOCK_H][9];
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if( (x >= (w - 1)) || (y >= h - 1) || (x == 0) || (y == 0)) return;
// Fill shared memory
int iter = 0;
for (int r = x - 1; r <= x + 1; r++)
{
for (int c = y - 1; c <= y + 1; c++)
{
surround[tid][iter] = in[c * w + r];
iter++;
}
}
// Sort shared memory to find the median using Bubble Short
for (int i = 0; i < 5; i++)
{
// Find the position of the minimum element
int minval = i;
for (int l = i+1; l<9; l++) if (surround[tid][l] < surround[tid][minval]) minval = l;
// Put found minimum element in its place
unsigned short temp = surround[tid][i];
surround[tid][i] = surround[tid][minval];
surround[tid][minval]=temp;
}
// Pick the middle one
out[(y*w)+x] = surround[tid][4];
__syncthreads();
}
//--------------------------------------------------------------------------------------------------------------------
const unsigned int imgw = 100;
const unsigned int imgh = 100;
void loadImg(unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *ch){
*w = imgw;
*h = imgh;
*ch = 1;
*data = (unsigned char *)malloc(imgw*imgh*sizeof(unsigned char));
for (int i = 0; i < imgw*imgh; i++) (*data)[i] = i%9;
}
//--------------------------------------------------------------------------------------------------------------------
int main()
{
unsigned char *data = NULL, *d_idata = NULL, *d_odata = NULL;
unsigned int w, h, channels;
unsigned int numElements;
size_t datasize;
loadImg(&data, &w, &h, &channels);
printf("Loaded input file with w:%d h:%d channels:%d \n",w, h, channels);
printf("input:\n");
for (int i = 0; i < BLOCK_W; i++)
{
for (int j = 0; j < BLOCK_H; j++) printf("%d ", data[i*w+j]);
printf("\n");
}
numElements = w*h*channels;
datasize = numElements * sizeof(unsigned char);
cudaMalloc(&d_idata, datasize);
cudaMalloc(&d_odata, datasize);
printf("Allocate Devicememory for data\n");
cudaMemcpy(d_idata, data, datasize, cudaMemcpyHostToDevice);
printf("Copy input data from the host memory to the CUDA device\n");
dim3 threadsPerBlock(BLOCK_W, BLOCK_H);
dim3 blocksPerGrid((w+threadsPerBlock.x-1)/threadsPerBlock.x, (h+threadsPerBlock.y-1)/threadsPerBlock.y);
printf("CUDA kernel launch with [%d %d] blocks of [%d %d] threads\n", blocksPerGrid.x, blocksPerGrid.y,
threadsPerBlock.x, threadsPerBlock.y);
median_filter<<<blocksPerGrid, threadsPerBlock>>>(d_idata, d_odata, w, h);
cudaMemcpy(data, d_odata, datasize, cudaMemcpyDeviceToHost);
printf("Copy output data from the CUDA device to the host memory\n");
printf("output:\n");
for (int i = 0; i < BLOCK_W; i++)
{
for (int j = 0; j < BLOCK_H; j++) printf("%d ", data[i*w+j]);
printf("\n");
}
free(data);
cudaFree(d_idata);
cudaFree(d_odata);
printf("Free device and host memory\n");
}
|
5,937 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cufft.h>
#include <iostream>
// #include <complex>
#include <cuda_runtime.h>
#define imin(a,b) (a<b?a:b)
/*--------- function called from main fortran programn ---------------*/
// ------------- Onli 1D, 2D or 3D cuFFT transform !!!!!!!!!!!!!!!!!!!
extern "C" void kernel_imestfft_t_(cuDoubleComplex *wtilde,int *NX,int *NY,int *NZ, int *Xcase)
{
cuDoubleComplex *psi_d;
int NDX = *NX;
int NDY = *NY;
int NDZ = *NZ;
int NDIM= NDX*NDY*NDZ;
int dcase= *Xcase;
//----------------------------------------------------------------------------------------------
// printf("IN T GPU _Transform \n");
cufftHandle plan;
cudaMalloc( (void **)&psi_d, sizeof(cuDoubleComplex) * NDIM );
cudaMemcpy( psi_d, wtilde, sizeof(cuDoubleComplex)*NDIM, cudaMemcpyHostToDevice );
// printf("Kernel GPU _Transform \n");
if (dcase == 1) cufftPlan1d(&plan, NDX, CUFFT_Z2Z, 1);
if (dcase == 2) cufftPlan2d(&plan, NDY, NDX, CUFFT_Z2Z);
if (dcase == 3) cufftPlan3d(&plan, NDZ, NDY, NDX, CUFFT_Z2Z);
cufftExecZ2Z(plan, psi_d, psi_d, CUFFT_FORWARD);
/* copy vectors from GPU to CPU */
cudaMemcpy(wtilde, psi_d, sizeof(cuDoubleComplex) * NDIM, cudaMemcpyDeviceToHost);
cufftDestroy(plan);
cudaFree(psi_d);
return;
}
|
5,938 | /*
* ABC.cpp
*
* Created on: 19 янв. 2016 г.
* Author: aleksandr
*/
#include "ABCTM.h"
#include <iostream>
ABCTM::ABCTM(GridTM* _grid) : EzLeft(_grid->sizeY*6, 0),
EzRight(_grid->sizeY*6, 0),
EzTop(_grid->sizeX*6, 0),
EzBottom(_grid->sizeX*6, 0),
coeff0(0),
coeff1(0),
coeff2(0),
grid(_grid),
coeffDevice(3, 0)
{
float temp1 = grid->S;
float temp2 = 1.0 / temp1 + 2.0 + temp1;
coeff0 = -(1.0 / temp1 - 2.0 + temp1) / temp2;
coeff1 = -2.0 * (temp1 - 1.0 / temp1) / temp2;
coeff2 = 4.0 * (temp1 + 1.0 / temp1) / temp2;
int sizeX = grid->sizeX;
int sizeY = grid->sizeY;
std::vector<float> coeffHost(3, 0);
coeffHost[0] = coeff0;
coeffHost[1] = coeff1;
coeffHost[2] = coeff2;
coeffDevice = coeffHost;
leftUpdater.setParams(grid->Ez.getDevicePtr(),
EzLeft.getDevicePtr(),
coeffDevice.data(),
sizeX, sizeY);
rightUpdater.setParams(grid->Ez.getDevicePtr(),
EzRight.getDevicePtr(),
coeffDevice.data(),
sizeX, sizeY);
topUpdater.setParams(grid->Ez.getDevicePtr(),
EzTop.getDevicePtr(),
coeffDevice.data(),
sizeX, sizeY);
bottomUpdater.setParams(grid->Ez.getDevicePtr(),
EzBottom.getDevicePtr(),
coeffDevice.data(),
sizeX, sizeY);
std::cout << "Absorption boundary conditions initialized \n";
}
|
5,939 | #include "stdio.h"
int main(void){
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
for(int i = 0 ; i < count ; i++){
cudaGetDeviceProperties(&prop, i);
printf("Name: %s\n", prop.name);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf("Total global mem: %d\n", prop.totalGlobalMem);
printf("Max threads per block: %d\n",prop.maxThreadsPerBlock);
}
}
|
5,940 | // https://github.com/AlexDWong/dijkstra-CUDA
// REFER THE PROGRAM FROM HERE
|
5,941 | #include <stdio.h>
#define N 256
#define TPB 256
__global__ void cuda_hello(){
printf("Hello World! My threadId is %d\n", threadIdx.x);
}
int main() {
cuda_hello<<<N/TPB,TPB>>>();
cudaDeviceSynchronize();
return 0;
}
|
5,942 | #include "includes.h"
__global__ void add(int N, double *a,double *b, double *c)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < N)
{
c[tid] = a[tid]+b[tid];
}
} |
5,943 | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
//
// kernel routine
//
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = threadIdx.x;
}
//
// CUDA routine to be called by main code
//
extern
int prac6(int nblocks, int nthreads)
{
float *h_x, *d_x;
int nsize, n;
// allocate memory for arrays
nsize = nblocks*nthreads ;
h_x = (float *)malloc(nsize*sizeof(float));
cudaMalloc((void **)&d_x, nsize*sizeof(float));
// execute kernel
my_first_kernel<<<nblocks,nthreads>>>(d_x);
// copy back results and print them out
cudaMemcpy(h_x,d_x,nsize*sizeof(float),cudaMemcpyDeviceToHost);
for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]);
// free memory
cudaFree(d_x);
free(h_x);
return 0;
}
|
5,944 | #include <stdio.h>
#include <ctime>
#include <cassert>
#include <cmath>
#include <utility>
#include <vector>
#include <algorithm>
#include <cstdlib>
#include <memory>
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
void __global__ point2gridmap(float* point, int* x_vec, int* y_vec, int* height, int d_size, int max_length, int max_height, int num_x, int num_y, int num_height)
{
int gid = threadIdx.x + blockDim.x*blockIdx.x;
if(gid >= d_size) return;
float gap_x, gap_y, gap_height;
gap_x = 2.0 * (float)max_length / (float)num_x;
gap_y = 2.0 * (float)max_length/(float)num_y;
gap_height = 2.0 * (float)max_height/ (float)num_height;
float x, y, z;
x = point[gid];
y = point[gid + d_size];
z = point[gid + 2 * d_size];
if(x == 0.0)
x = 0.0001;
if(y == 0.0)
y = 0.0001;
if(z == 0.0)
z = 0.0001;
if(x > 1.0)
x = 0.9999;
if(y > 1.0)
y = 0.9999;
if(z > 1.0)
z = 0.9999;
if(x < -1.0)
x = -0.9999;
if(y < -1.0)
y = -0.9999;
if(z < -1.0)
z = -0.9999;
int idx_ring = floor((x + 1.0) / gap_x);
int idx_sector = floor((y + 1.0) / gap_y);
int idx_height = floor((z + 1.0) / gap_height);
height[gid] = idx_height;
x_vec[gid] = idx_ring;
y_vec[gid] = idx_sector;
__syncthreads();
}
|
5,945 | #include "includes.h"
__global__ void square(float* d_out, float* d_in)
{
int idx = threadIdx.x; // here depends on the <<<block, threadPerBlock>>>, build-in variable: threadIdx
float f = d_in[idx];
d_out[idx] = f * f;
} |
5,946 | #include <cuda_runtime_api.h>
#include <iostream>
/*
Before you use your GPU to do work, you should know the
most essential things about its capabilities.
*/
int main()
{
// Count CUDA-capable devices on the system
int numDevices;
cudaGetDeviceCount(&numDevices);
if (numDevices == 0)
{
std::cout << "You have no CUDA devices available!" << std::endl;
return -1;
}
// Get the ID of the currently selected active CUDA device
int device;
cudaGetDevice(&device);
// Fetch its properties
cudaDeviceProp props;
cudaGetDeviceProperties(&props, device);
/*
We only print the most fundamental properties here. cudaDeviceProp
contains a long range of indicators to check for different things
that your GPU may or may not support, as well as factors for
performance. However, the most essential property to know about is
the compute capability of the device.
*/
std::cout << "Model: " << props.name << std::endl;
std::cout << "Compute capability: " << props.major << "." << props.minor << std::endl;
std::cout << "Memory: " << props.totalGlobalMem / float(1 << 30) << " GiB" << std::endl;
std::cout << "Multiprocessors: " << props.multiProcessorCount << std::endl;
std::cout << "Clock rate: " << props.clockRate / float(1'000'000) << " GHz" << std::endl;
return 0;
}
/*
Exercises:
1) Change the behavior such that the properties are not just printed for one, but all available CUDA devices you have!
(Even if you have just one)
2) Print a few more interesting properties and read up in the specification what they mean.
*/
|
5,947 | #include "includes.h"
__global__ void cuda_standarization(float *data, int rows, int columns) {
int total_threads_count = blockDim.x * gridDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float var, ave, amo;
for (int i = tid+1; i < columns; i=i+total_threads_count) {
amo = 0, var = 0;
for (int j = 0; j < rows; ++j) {
amo = amo + *(data + (j * columns) + i);
}
ave = amo / float(rows);
for (int j = 0; j < rows; ++j) {
float factor = *(data + (j * columns) + i) - ave;
var = var + (factor * factor);
}
if (var == 0) {
for (int j = 0; j < rows; j++) {
*(data + (j * columns) + i) = *(data + (j * columns) + i) / 255.;
}
continue;
}
float sd_reciprocal = 1./sqrt(var);
for (int j = 0; j < rows; j++) {
*(data + (j * columns) + i) = (*(data + (j * columns) + i) - ave) * sd_reciprocal;
}
}
} |
5,948 | #include "includes.h"
__global__ void convolution_global_memory_gray(unsigned char *N,float *M,unsigned char* g,std::size_t cols, std::size_t rows,std::size_t mask_size){
int paddingSize = ( mask_size-1 )/2;
unsigned int paddedH = cols + 2 * paddingSize;
unsigned int paddedW = rows + 2 * paddingSize;
int i = blockIdx.x * blockDim.x + threadIdx.x + paddingSize;
int j = blockIdx.y * blockDim.y + threadIdx.y + paddingSize;
if( (j >= paddingSize) && (j < paddedW-paddingSize) && (i >= paddingSize) && (i<paddedH-paddingSize)) {
unsigned int oPixelPos = (j - paddingSize ) * cols + (i -paddingSize);
for(int k = -paddingSize; k <= paddingSize; k++){
for(int l = -paddingSize; l<=paddingSize; l++){
unsigned int iPixelPos = (j+l)*cols+(i+k);
unsigned int coefPos = (k + paddingSize) * mask_size + (l+ paddingSize);
g[oPixelPos] += N[iPixelPos] * M[coefPos];
}
}
}
} |
5,949 | #include <iostream>
#include <math.h>
#include <time.h>
using namespace std;
// Kernel function to add the elements of two arrays
__global__
void dijkstra(int N, int *hasil_gabung, int *graph)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int src = index; src < N; src += stride){
const int N_const = N;
int dist[110]; // Ganti ini juga sesuai dengan nilai N
int sptSet[110]; // Ganti ini juga sesuai dengan nilai N
for (int i = 0; i < N; i++)
dist[i] = INT_MAX, sptSet[i] = 0;
dist[src] = 0;
for (int count = 0; count < N - 1; count++) {
int min = INT_MAX, min_index;
for (int v = 0; v < N; v++)
if (sptSet[v] == 0 && dist[v] <= min) min = dist[v], min_index = v;
int u = min_index;
sptSet[u] = 1;
for (int v = 0; v < N; v++)
if (!sptSet[v] && graph[u*N+v] && dist[u] != INT_MAX
&& dist[u] + graph[u*N+v] < dist[v])
dist[v] = dist[u] + graph[u*N+v];
}
for (int i=0; i<N; i++) {
hasil_gabung[src*N+i] = dist[i];
}
}
}
int main(int argc, char** argv)
{
int N = stoi(argv[1]);
int *hasil_gabung;
int *graph;
cudaMallocManaged(&hasil_gabung, N*N*sizeof(int));
cudaMallocManaged(&graph, N*N*sizeof(int));
srand(13517093);
for(int i = 0;i<N;i++) {
graph[i*N+i] = 0;
for(int j = i+1;j<N;j++) {
graph[i*N+j] = rand() % 23;
if(graph[i*N+j] == 0) graph[i*N+j] = 1;
graph[j*N+i] = graph[i*N+j];
}
}
struct timeval start, end;
// gettimeofday(&start, NULL);
int blockSize = stoi(argv[2]);
int numBlocks = (N + blockSize - 1) / blockSize;
clock_t tStart = clock();
dijkstra<<<numBlocks , blockSize>>>(N, hasil_gabung, graph);
cudaDeviceSynchronize();
//for (int i = 0;i < N;i++) {
// for (int j = 0;j < N; j++) {
// cout << graph[i*N+j];
// if(j != N-1) {
// cout << " ";
// }
//}
//cout << endl;
// }
cout << "------DIJKSTRA-------" << endl;
for (int i = 0;i < N;i++) {
for (int j = 0;j < N; j++) {
cout << hasil_gabung[i*N+j];
if(j != N-1) {
cout << " ";
}
}
cout << endl;
}
// gettimeofday(&end, NULL);
// double delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
// printf("Time execution : %lf", delta);
printf("Time taken: %.2f microsekon\n", (double)(clock() - tStart)/CLOCKS_PER_SEC*1000000 );
// https://www.geeksforgeeks.org/clock-function-in-c-c/
// Free memory
cudaFree(hasil_gabung);
cudaFree(graph);
return 0;
} |
5,950 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <iostream>
__global__ void globalMem_reduce_kernel(float *d_out, float *d_in) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int thrId = threadIdx.x;
// reduction in global memory
// loop gives 50, 25, 12, 6, 3, 1 for blockDim.x = 100
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (thrId < s) { // if threadId.x is on the left half
d_in[myId] += d_in[myId + s];
}
__syncthreads();
}
// thread 0 writes result of this block to global memory
if (thrId == 0) {
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void sharedMem_reduce_kernel(float *d_out, float *d_in) {
// shared_data is allocated in the kernel call: 3rd argument
extern __shared__ float shared_data[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int thrId = threadIdx.x;
// load shared memory from global memory
shared_data[thrId] = d_in[myId];
__syncthreads();
// do reduction in SHARED memory
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (thrId < s) {
shared_data[thrId] += shared_data[thrId + s];
}
__syncthreads();
}
// thread 0 writes result of this block from shared to global memory
if (thrId == 0) {
d_out[blockIdx.x] = shared_data[0];
}
}
void reduce (float *d_out, float *d_intermediate, float *d_in, int size, bool useSharedMem) {
// assumption 1: size is not greater than maxThreadsPerBlock ** 2
// assumption 2: size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 1024;
int threads = maxThreadsPerBlock;
int blocks = size / maxThreadsPerBlock;
if (useSharedMem) {
sharedMem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>>(d_intermediate, d_in);
} else {
globalMem_reduce_kernel<<<blocks, threads>>>(d_intermediate, d_in);
}
// now we are down to one block, reduce it
threads = blocks; // each block wrote one number into d_intermediate
blocks = 1;
if (useSharedMem) {
sharedMem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>>(d_out, d_intermediate);
} else {
globalMem_reduce_kernel<<<blocks, threads>>>(d_out, d_intermediate);
}
}
int main(int argc, char* argv[]) {
// --- Checking whether there is a device --- //
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
std::cerr << "No GPUs found" << std::endl;
exit(EXIT_FAILURE);
}
// --- Properties of device --- //
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProps;
if (cudaGetDeviceProperties(&deviceProps, dev) == 0) {
std::cout << "Using device " << dev << std::endl;
std::cout << deviceProps.name << std::endl;
std::cout << "Global memory: " << deviceProps.totalGlobalMem << std::endl;
std::cout << "Comoute v: " << static_cast<int>(deviceProps.major) << "." <<
static_cast<int>(deviceProps.minor)<< std::endl;
std::cout << "Clock: " << static_cast<int>(deviceProps.clockRate) << std::endl;
}
// --- Actual task - Reducing a sequence of numbers wit op "+" --- //
const int ARRAY_SIZE = 1 << 20;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate input array on host
float h_in[ARRAY_SIZE];
float sum = 0.0f;
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = -1.0f + static_cast<float>(random())/(static_cast<float>(RAND_MAX)/2.0f);
sum += h_in[i];
}
// declare device pointers
float *d_in, *d_intermediate, *d_out;
// allocate device memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated
cudaMalloc((void **) &d_out, sizeof(float));
// transfer input array to device
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
int whichKernel = 0;
if (argc == 2) {
whichKernel = atoi(argv[1]);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// launch one of the kernels
// with or without shared memory
switch (whichKernel) {
case 0:
std::cout << "Running global reduce" << std::endl;
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false);
}
cudaEventRecord(stop, 0);
break;
case 1:
std::cout << "Running shared reduce" << std::endl;
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++) {
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true);
}
cudaEventRecord(stop, 0);
break;
default:
std::cerr << "No kernel run!" << std::endl;
exit(EXIT_FAILURE);
}
// calculate elapsed time
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
elapsed /= 100.0f; // averaged over 100 trials
// copy back the result to host
float h_out;
cudaMemcpy(&h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Average time elapsed: " << elapsed << std::endl;
std::cout << "Host result: " << sum << ", device result: " << h_out << std::endl;
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
return 0;
}
|
5,951 | //PROGRAMA QUE SUMA DOS VECTORES (a y b) Y ALMACENA EL RESULTADO EN EL VECTOR (c)
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 1000
__global__ void add(int *a, int *b, int *c)
{
//int tid = blockIdx.x;
int tid = threadIdx.x;
if(tid < N)
{
c[tid] = a[tid] + b[tid];
}
}
int main()
{
int a[N], b[N], c[N],i,j;
int *dev_a, *dev_b, *dev_c;
//Reservar memoria en el GPU
cudaMalloc((void**)&dev_a,N*sizeof(int));
cudaMalloc((void**)&dev_b,N*sizeof(int));
cudaMalloc((void**)&dev_c,N*sizeof(int));
//Se rellenan los arreglos 'a' y 'b' en el CPU
for(i=0; i<N;i++)
{
a[i]=i+1;
b[i]=i*i;
}
//Se copian los arreglos 'a' y 'b' a la GPU
cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*sizeof(int),cudaMemcpyHostToDevice);
//Se manda llamar el kernel
//add<<<N,1>>>(dev_a,dev_b,dev_c);
add<<<1,N>>>(dev_a,dev_b,dev_c);
//Se copia el arreglo 'c' de la GPU al CPU
cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost);
//Se muestran los resultados
for(j=0;j<N;j++)
{
printf("%d + %d = %d\n",a[j],b[j],c[j]);
}
//Se libera la memoria reservada en la GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
5,952 | #include "includes.h"
__global__ void BackwardSoftmax(float *A, float *dA, int nColsdZ, float *dZ)
{
int row = threadIdx.x;
int col = blockIdx.x;
dZ[row * nColsdZ + col] = dA[row * nColsdZ + col] * A[row * nColsdZ + col] *
(1 - A[row * nColsdZ + col]);
} |
5,953 | #include "includes.h"
__global__ void cuda_radiation_kernel() {} |
5,954 | #include "includes.h"
__global__ static void ConnectPointsStatus(int* PointType_BestN, int* ConnectStatus, int size, int rows, int ChooseBestN, int ConnectRadius)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= size * rows * ChooseBestN) // 判斷是否超出大小
return;
// 算 Index
int sizeIndex = id / (rows * ChooseBestN);
int tempID = id % (rows * ChooseBestN);
int rowIndex = tempID / ChooseBestN;
int chooseIndex = tempID % ChooseBestN;
// 代表這個點沒有有效的點
if (PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] == -1)
return;
// 如果是有效的點,就繼續往下追
int finalPos = min(rowIndex + ConnectRadius, rows); // 截止條件
for (int i = rowIndex + 1; i < finalPos; i++)
{
for (int j = 0; j < ChooseBestN; j++)
{
// 下一個點的位置 (第 i 個 row 的點)
// 然後的第 1 個點
if (PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j] != -1)
{
// 前面項為現在這個點
// 後面項為往下的點
int diffX = PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] -
PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j];
int diffY = i - rowIndex;
int Radius = diffX * diffX + diffY * diffY;
// 0 沒有用到喔
if (Radius < ConnectRadius * ConnectRadius)
{
// 張數的位移 + Row 的位移 + 現在在 Top N 的點 + 半徑的位移 + 往下 Top N 的結果
int index = sizeIndex * rows * ChooseBestN * ConnectRadius * ChooseBestN + // 張數
rowIndex * ChooseBestN * ConnectRadius * ChooseBestN + // Row
chooseIndex * ConnectRadius * ChooseBestN + // 現在在 Top N
(i - rowIndex) * ChooseBestN + // 半徑
j;
ConnectStatus[index] = Radius;
}
}
}
}
} |
5,955 | #include<stdio.h>
#include<iostream>
using namespace std;
int main(int argc, char* argv[]){
cudaDeviceProp property;
cudaGetDeviceProperties(&property, 0);
cout << property.name << endl;
cout << property.major << endl;
cout << property.minor << endl;
cout << property.totalGlobalMem << endl;
cout << property.clockRate << endl;
cout << "sharedMem" << endl;
cout << property.sharedMemPerBlock << endl;
cout << property.regsPerBlock << endl;
cout << "warpSize" << endl;
cout << property.warpSize << endl;
cout << "Maximum thread" << endl;
cout << property.maxThreadsPerMultiProcessor << endl;
cout << "number of MP" << endl;
cout << property.multiProcessorCount<< endl;
return 0;
}
|
5,956 | #include "includes.h"
__global__ void sga_right_weight_backward (const int n, const float *bottom_data, const float *top_data, const float *temp_diff, const int height, const int width, const int depth, const int wsize, float *filters_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / step * step * depth + index % step; //up->down
int fbase = index / step * step * wsize + index % step;
// int row = index%step/width;
int col = index % step % width;
for (int i = 0; i < depth; i++)
filters_diff[fbase] +=
temp_diff[base + i * step] * bottom_data[base + i * step];
if (col - 1 >= 0)
{
int location = fbase + step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step - 1];
location = fbase + 3 * step;
filters_diff[location] += temp_diff[base] * bottom_data[base];
for (int i = 1; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i - 1) * step - 1];
location = fbase + 4 * step;
filters_diff[location] +=
temp_diff[base + (depth - 1) * step] * bottom_data[base + (depth - 1) * step];
for (int i = 0; i < depth - 1; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i + 1) * step - 1];
}
/*
else{
//int location = fbase + step;
for(int i=0; i<depth; i++){
float temp = temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
}
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
// location = fbase + 3*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
//
// location = fbase + 4*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
if (col - 2 >= 0)
{
int location = fbase + 2 * step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step - 2];
} /*
else{
int location = fbase + 2*step;
for(int i=0; i<depth; i++)
filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
} */
} |
5,957 | __global__
void velocityMagnitude(float * blockMags,
const float * d_levelset,
const float * d_velIn_x,
const float * d_velIn_y)
{
}
void velocityMagnitude(dim3 blocks, dim3 threads, float * blockMags,
const float * d_levelset,
const float * d_velIn_x,
const float * d_velIn_y)
{
velocityMagnitude<<<blocks,threads>>>
(blockMags, d_levelset, d_velIn_x,
d_velIn_y);
}
|
5,958 | #include "median_tree.cuh" |
5,959 | #include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/times.h>
#include <time.h>
#include <math.h>
#include <cuda_runtime.h>
#define PI 3.14159265358979323846
#define FactorArcosegRad 0.00000484814
#define BLOQUESIZE 4
clock_t timestart, timeend;
/**
@brief Función que transforma un valor en arco segundo a radianes
@param deltax: Valor numérico a transformar
@returns Valor correspondiente a la entrada en radianes */
float arcoseg_radian(float deltax){
return FactorArcosegRad*deltax;
}
/**
@brief Función que lee el archivo de entrada
@param archivo: puntero al archivo a leer
@param tamano: Numero de visibilidades del archivo a leer
@returns */
double* readFile(FILE* archivo, int tamano){
double* elementos =(double*) malloc(sizeof(double)*4*tamano);
fread(elementos, tamano*4, sizeof(double), archivo);
return elementos;
}
/**
@brief Función ejecuta el proceso de gridding
@param U: Valores de la coordenada U en el plano de Fourier
@param V: Valores de la coordenada V en el plano de Fourier
@param R: Valores reales de la visibilidad en el plano de Fourier
@param I: Valores imaginarios la visibilidad en el plano de Fourier
@param num_datos: Cantidad de visibilidades ingresadas o dimensión de los vectores anteriores
@param tamano: Lado de la matriz a construir, si tamano es 512 se construye una matriz de 512X512
@param V: Valores de la coordenada V en el plano de Fourier
@param deltaU: Valor delta necesario para determinar la vecindad de cada pixel de la grilla regular
@param r: vector de valores reales de la salida del proceso de gridding
@param k: vector de valores imaginarios de la salida del proceso de gridding
@returns */
__global__ void gridding_process(float *U, float *V, float *R, float *I, int num_datos, int tamano, float deltaU, float *r, float *k)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<num_datos)
{
float x, y, modx, mody;
x = U[i]/deltaU+tamano/2;
y = V[i]/deltaU+tamano/2;
modx = U[i] - x*deltaU;
mody = V[i] - y*deltaU;
if(modx>deltaU/2){
x+=1;
}
if (mody>deltaU/2)
{
y+=1;
}
if ((int)x<tamano && (int)y<tamano)
{
atomicAdd(&r[(int)y*tamano+(int)x], R[i]);
atomicAdd(&k[(int)y*tamano+(int)x], I[i]);
}
}
}
__host__ unsigned long upper_power_of_two(unsigned long v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
int main(int argc, char * const argv[])
{
int tamano;//tamaño de imagen
int numdatos;//número de pasos
float deltaX_arcoseg, deltaX_radian;
float deltaU;
char* archivo_entrada=NULL;
char* archivo_salida=NULL;
char* archivo_salida_i;
int i, c;
opterr = 0;
while ((c = getopt (argc, argv, "i:z:d:N:o:")) != -1)
switch (c)
{
case 'i':
archivo_entrada = optarg;
break;
case 'z':
numdatos = atoi(optarg);
break;
case 'd':
deltaX_arcoseg = atof(optarg);
break;
case 'N':
tamano = atoi(optarg);
break;
case 'o':
archivo_salida = optarg;
break;
case '?':
if (optopt == 'i' ||optopt == 'z' ||optopt == 'd'||optopt == 'N' ||optopt == 'o')
fprintf (stderr, "Opcion -%c requiere un argumento.\n", optopt);
else if (isprint (optopt))
fprintf (stderr, "Opcion desconocida `-%c'.\n", optopt);
else
fprintf (stderr,
"Carater opcion desconocido `\\x%x'.\n",
optopt);
return 1;
default:
abort ();
}
/**
Comprobación de Inputs
- Valores mayores que cero
- Cadenas no nulas
**/
if(tamano<=0){
printf("El parametro -N debe existir y ser mayor que 0\n");
exit(1);
}
if(numdatos==0){
printf("El parametro -z debe existir y ser mayor que 0\n");
exit(1);
}
if(deltaX_arcoseg==0){
printf("El parametro -d debe existir y ser mayor que 0\n");
exit(1);
}
if(archivo_entrada==NULL){
printf("Debe especificarse un archivo de entrada\n");
}
if(archivo_salida==NULL){
printf("Debe especificarse un archivo de salida\n");
}
//Transformacion de unidades necesaria para calcular delta U
deltaX_radian = arcoseg_radian(deltaX_arcoseg);
//Determina delta U/V a utilizar
deltaU = 1/(tamano*deltaX_radian);
//Medición de tiempo de computo
timestart = clock();
//Lectura de entrada
FILE *entrada = fopen(archivo_entrada,"r");
double* data = readFile(entrada,numdatos);
fclose(entrada);
//Creando arrays para coordenada X, Y, R e I
float *X = (float*)malloc(sizeof(float)*numdatos);
float *Y = (float*)malloc(sizeof(float)*numdatos);
float *R = (float*)malloc(sizeof(float)*numdatos);
float *I = (float*)malloc(sizeof(float)*numdatos);
//Quizas necesite dos vectores adicionales para el gridding [matrices desenroyadas]
float *r = (float*)malloc(sizeof(float)*tamano*tamano);
float *k = (float*)malloc(sizeof(float)*tamano*tamano);
//Se asigan los valores correspondientes de la lectura
for (i = 0; i < numdatos; i++)
{
X[i] = (float)data[i];
Y[i] = (float)data[i+numdatos];
R[i] = (float)data[i+2*numdatos];
I[i] = (float)data[i+3*numdatos];
}
for (i = 0; i < tamano*tamano; ++i)
{
r[i] = 0;
k[i] = 0;
}
//se declaran las variables CUDA
float *C_X;
float *C_Y;
float *C_R;
float *C_I;
float *C_r;
float *C_k;
//Se reserva memoria CUDA
cudaMalloc( (void**)&C_X, numdatos*sizeof(float));
cudaMalloc( (void**)&C_Y, numdatos*sizeof(float));
cudaMalloc( (void**)&C_R, numdatos*sizeof(float));
cudaMalloc( (void**)&C_I, numdatos*sizeof(float));
cudaMalloc( (void**)&C_r, tamano*tamano*sizeof(float));
cudaMalloc( (void**)&C_k, tamano*tamano*sizeof(float));
//se copia la matriz iniciada en las matrices de trabajo en memoria global GPU
cudaMemcpy( C_X, X, numdatos*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy( C_Y, Y, numdatos*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy( C_R, R, numdatos*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy( C_I, I, numdatos*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy( C_r, r, tamano*tamano*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy( C_k, k, tamano*tamano*sizeof(float), cudaMemcpyHostToDevice);
//determino dimension para el kernel
long data_size_2 = upper_power_of_two(numdatos);
//Se declaran las dimenciones
dim3 dimBlock(BLOQUESIZE, 1);
dim3 dimGrid(data_size_2/BLOQUESIZE, 1);
//se ejecuta el kernel en la GPU
//printf("%d - %d - %d\n", numdatos, kernel_size, kernel_size/BLOQUESIZE);
gridding_process<<<dimGrid, dimBlock>>>(C_X, C_Y, C_R, C_I, numdatos, tamano, deltaU, C_r, C_k);
//se espera a que terminen
cudaDeviceSynchronize();
//se obtiene la memoria de regreso
cudaMemcpy( r, C_r, tamano*tamano*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( k, C_k, tamano*tamano*sizeof(float), cudaMemcpyDeviceToHost);
//se libera la memoria global CUDA para que pueda ser usada por otro proceso
cudaFree( C_X );
cudaFree( C_Y );
cudaFree( C_R );
cudaFree( C_I );
cudaFree( C_r );
cudaFree( C_k );
//Se imprime salida
archivo_salida_i = (char*)malloc(sizeof(archivo_salida)*2);
strcpy(archivo_salida_i, archivo_salida);
FILE *f = fopen(strcat(archivo_salida, "real.raw"),"wb");
FILE *g = fopen(strcat(archivo_salida_i, "img.raw"),"wb");
fwrite(r, tamano*tamano, sizeof(float),f);
fwrite(k, tamano*tamano, sizeof(float),g);
fclose(f);
fclose(g);
//Se mide el tiempo utilizado
timeend = clock();
printf("Total = %f\n", (double) (timeend-timestart)/(double)CLOCKS_PER_SEC);
return EXIT_SUCCESS;
} |
5,960 | // reference: https://devblogs.nvidia.com/parallelforall/even-easier-introduction-cuda/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
// __global__ specifies a kernel in CUDA. It specifies that
// this function runs on the GPU but can be called from the
// CPU. Code that runs on the GPU is called device code.
// code that runs on the CPU is called host code.
__global__ void add(int n, float* x, float* y) {
// Adding an index and a stride is how you tell
// the kernel not to run through the entiretly of
// the array. Starting at index, each thread will
// run a certain number of iterations that are
// stride apart.
// The calculation for the index and stride are
// idiomatic CUDA. They are finding the thread
// via its offset from the start of all the threads
// Note: this type of loop is called a grid-stride loop
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
y[i] = x[i] + y[i];
}
}
// Moves the initialization of the arrays to the GPU.
// This reduces page fault issues when performing the
// add kernel
__global__ void init(int n, float* x, float* y) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
x[i] = 1.0f;
y[i] = 2.0f;
}
}
int main()
{
int N = 1 << 20; // 1M elements
float* x;//= new float[N]; This is how we do it for the CPU
float* y;//= new float[N];
// To make these variables in Unified Memory, which is
// memory that can be accessed by all CPUs and GPUs on
// the system, we use cudaMallocManaged
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// The following code is what I would use if I was prefetching:
/*
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(x, N * sizeof(float), device, NULL);
cudaMemPrefetchAsync(y, N * sizeof(float), device, NULL);
*/
// A function the GPU can run is called a kernel in CUDA
// This function is the kernel in this example
// triple angle brackets launches the add kernel on the
// GPU. Changing the values in here configures the
// number of usable threads. The number on the right
// can be increased by multiples of 32 because that is
// what CUDA GPUs do. The second parameter is the number
// of threads. The first parameter is the number of
// thread blocks. There are 256 threads available per
// block.
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// Create events to allow performance measuring
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// initialize the arrays. I've decided to do this on the GPU
// rather than prefetch. Either way, I would have prevented
// losing performance to page faults.
init <<<numBlocks, blockSize >>> (N, x, y);
// add syncrhonous error handling.
// add asynchronous error handling. Note, this is not something
// you want to add in the release build because it will force
// the GPU to finish before the CPU can continue
// note: added this to the init kernel since I was timing the
// main calculation kernel, add
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess) {
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
}
if (errAsync != cudaSuccess) {
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
// time the main calculation
cudaEventRecord(start);
add<<<numBlocks, blockSize>>>(N, x, y);
cudaEventRecord(stop);
// wait for GPU to finish before accessing on CPU. With old GPUs,
// forgetting this step would likely result in a segmentation fault.
// If using a Pascal-type GPU, page faults are supported, so the CPUs,
// and GPUs can simultaneously access the memory. You still need this
// call on a Pascal GPU to avoid reading invalid data (race condition)
// A call to this function is also necessary to measure kernel execution
// time as opposed to kernel launch time.
//cudaDeviceSynchronize();
// This method blocks execution until an event
// is recorded!
cudaEventSynchronize(stop);
// Collect the elapsed time
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Check for errors (all values should be 3.0f(
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = fmax(maxError, fabs(y[i] - 3.0f));
}
std::cout << "Max error: " << maxError << std::endl
<< "Elapsed Time: " << milliseconds << std::endl;
// Free memory
//delete[] x; This is how we do it on a CPU
//delete[] y;
// cudaFree frees resources in Unified Memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
5,961 | #include "includes.h"
__global__ void non_max_supp_kernel(unsigned char *data, unsigned char *out, unsigned char *theta, int rows, int cols) {
extern __shared__ int l_mem[];
int* l_data = l_mem;
// These variables are offset by one to avoid seg. fault errors
// As such, this kernel ignores the outside ring of pixels
const int L_SIZE = blockDim.x;
const int g_row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int g_col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int l_row = threadIdx.y + 1;
const int l_col = threadIdx.x + 1;
const int pos = g_row * cols + g_col;
// copy to l_data
l_data[l_row * (L_SIZE + 2) + l_col] = data[pos];
// top most row
if(l_row == 1) {
l_data[0 * (L_SIZE + 2) + l_col] = data[pos - cols];
// top left
if(l_col == 1)
l_data[0 * (L_SIZE + 2) + 0] = data[pos - cols - 1];
// top right
else if(l_col == L_SIZE)
l_data[0 * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos - cols + 1];
}
// bottom most row
else if(l_row == L_SIZE) {
l_data[(L_SIZE + 1) * (L_SIZE + 2) + l_col] = data[pos + cols];
// bottom left
if(l_col == 1)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + 0] = data[pos + cols - 1];
// bottom right
else if(l_col == L_SIZE)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + cols + 1];
}
if(l_col == 1)
l_data[l_row * (L_SIZE + 2) + 0] = data[pos - 1];
else if(l_col == L_SIZE)
l_data[l_row * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + 1];
__syncthreads();
unsigned char my_magnitude = l_data[l_row * (L_SIZE + 2) + l_col];
// The following variables are used to address the matrices more easily
switch(theta[pos]) {
// A gradient angle of 0 degrees = an edge that is North/South
// Check neighbors to the East and West
case 0:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[l_row * (L_SIZE + 2) + l_col + 1] || // east
my_magnitude <= l_data[l_row * (L_SIZE + 2) + l_col - 1]) // west
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
// A gradient angle of 45 degrees = an edge that is NW/SE
// Check neighbors to the NE and SW
case 45:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[(l_row - 1) * (L_SIZE + 2) + l_col + 1] || // north east
my_magnitude <= l_data[(l_row + 1) * (L_SIZE + 2) + l_col - 1]) // south west
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
// A gradient angle of 90 degrees = an edge that is E/W
// Check neighbors to the North and South
case 90:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[(l_row - 1) * (L_SIZE + 2) + l_col] || // north
my_magnitude <= l_data[(l_row + 1) * (L_SIZE + 2) + l_col]) // south
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
// A gradient angle of 135 degrees = an edge that is NE/SW
// Check neighbors to the NW and SE
case 135:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[(l_row - 1) * (L_SIZE + 2) + l_col - 1] || // north west
my_magnitude <= l_data[(l_row + 1) * (L_SIZE + 2) + l_col + 1]) // south east
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
default: out[pos] = my_magnitude; break;
}
} |
5,962 | /*
* Author: Kasjan Siwek
*
* Application simulates NxN masses connected by springs. At time 0 we place
* M charges in the system. Each charge causes nearby masses (those that
* are in radius R_m from the charge) to instantly travel to the middle
* of said charge. Those masses then stay there infinetely. We then
* simulate how the rest of the system behaves till it stops (due to
* friction).
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <iostream>
#include <cfloat>
#include <stdio.h>
#define BLOCK_SIZE 16
#define GAMMA 0.999
#define DT 0.01
#define EPSILON 1e-3
// Simulates one step of simulation
__global__ void simulate(int N, float K, float *vel_x, float *vel_y, float *pos_x, float *pos_y, float *b_vel_x, float *b_vel_y, float *b_pos_x, float *b_pos_y, int * last_ch) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
float f_x = 0;
float f_y = 0;
if (row < N - 1 && col < N - 1
&& row != 0 && col != 0
&& vel_x[row * N + col]!= FLT_MAX
&& vel_y[row * N + col] != FLT_MAX) {
float p_xl = pos_x[(row - 1) * N + col];
float p_xb = pos_x[row * N + col - 1];
float p_x = pos_x[row * N + col];
float p_xt = pos_x[row * N + col + 1];
float p_xr = pos_x[(row + 1) * N + col];
float p_yl = pos_y[(row - 1) * N + col];
float p_yb = pos_y[row * N + col - 1];
float p_y = pos_y[row * N + col];
float p_yt = pos_y[row * N + col + 1];
float p_yr = pos_y[(row + 1) * N + col];
float v_x = vel_x[row * N + col];
float v_y = vel_y[row * N + col];
b_pos_x[row * N + col] = p_x + v_x * DT;
b_pos_y[row * N + col] = p_y + v_y * DT;
f_x = K * (p_xl + p_xr + p_xt + p_xb - 4 * p_x);
f_y = K * (p_yl + p_yr + p_yt + p_yb - 4 * p_y);
b_vel_x[row * N + col] = v_x * GAMMA + f_x * DT;
b_vel_y[row * N + col] = v_y * GAMMA + f_y * DT;
}
if ((abs(f_x) > EPSILON || abs(f_y) > EPSILON)) {
*last_ch += 1;
}
}
int main() {
cudaError_t err = cudaSuccess;
int N, M;
float K;
std::cin >> N >> M >> K;
int numOfElements = N * N;
size_t size = numOfElements * sizeof(float);
// Malloc on host
float *h_velocity_x = (float *)malloc(size);
float *h_velocity_y = (float *)malloc(size);
float *h_position_x = (float *)malloc(size);
float *h_position_y = (float *)malloc(size);
// Initialization
for (int i = 0; i < numOfElements; ++i) {
int x = i / N;
int y = i % N;
h_position_x[i] = x;
h_position_y[i] = y;
h_velocity_x[i] = 0;
h_velocity_y[i] = 0;
// I assume that neither mass is gonna reach FLT_MAX velocity,
// so I save that value for static masses
if (x == 0 || y == 0) {
h_velocity_x[i] = FLT_MAX;
h_velocity_y[i] = FLT_MAX;
}
}
// Initializing charges
for (int k = 0; k < M; ++k) {
float X, Y, R;
std::cin >> X >> Y >> R;
for (int i = 0; i < numOfElements; ++i) {
int x = i / N;
int y = i % N;
if ( (X-x)*(X-x) + (Y-y)*(Y-y) <= R*R ) {
h_position_x[i] = X;
h_position_y[i] = Y;
h_velocity_x[i] = FLT_MAX;
h_velocity_y[i] = FLT_MAX;
}
}
}
// Device malloc
float *d_velocity_x = NULL;
cudaMalloc((void **)&d_velocity_x, size);
float *d_velocity_y = NULL;
cudaMalloc((void **)&d_velocity_y, size);
float *d_position_x = NULL;
cudaMalloc((void **)&d_position_x, size);
float *d_position_y = NULL;
cudaMalloc((void **)&d_position_y, size);
float *d_velocity_x2 = NULL;
cudaMalloc((void **)&d_velocity_x2, size);
float *d_velocity_y2 = NULL;
cudaMalloc((void **)&d_velocity_y2, size);
float *d_position_x2 = NULL;
cudaMalloc((void **)&d_position_x2, size);
float *d_position_y2 = NULL;
cudaMalloc((void **)&d_position_y2, size);
int *counter = NULL;
cudaMalloc((void **)&counter, sizeof(int));
// Initialize eps counter
int aux_cnt = 1;
cudaMemcpy(counter, &aux_cnt, sizeof(int), cudaMemcpyHostToDevice);
// Copy data to device
cudaMemcpy(d_velocity_x, h_velocity_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_velocity_y, h_velocity_y, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_position_x, h_position_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_position_y, h_position_y, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_velocity_x2, h_velocity_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_velocity_y2, h_velocity_y, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_position_x2, h_position_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_position_y2, h_position_y, size, cudaMemcpyHostToDevice);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, 2 * BLOCK_SIZE);
dim3 dimGrid((N + dimBlock.x - 1) / dimBlock.x, (N + dimBlock.y - 1) / dimBlock.y);
bool parity = true;
float *vel_x, *b_vel_x, *vel_y, *b_vel_y, *pos_x, *b_pos_x, *pos_y, *b_pos_y;
int change;
int previous = 0;
int last_change;
do {
vel_x = parity ? d_velocity_x : d_velocity_x2;
vel_y = parity ? d_velocity_y : d_velocity_y2;
pos_x = parity ? d_position_x : d_position_x2;
pos_y = parity ? d_position_y : d_position_y2;
b_vel_x = parity ? d_velocity_x2 : d_velocity_x;
b_vel_y = parity ? d_velocity_y2 : d_velocity_y;
b_pos_x = parity ? d_position_x2 : d_position_x;
b_pos_y = parity ? d_position_y2 : d_position_y;
parity = parity ? false : true;
simulate<<<dimGrid, dimBlock>>>(N, K, vel_x, vel_y, pos_x, pos_y, b_vel_x, b_vel_y, b_pos_x, b_pos_y, counter);
cudaMemcpy(&last_change, counter, sizeof(int), cudaMemcpyDeviceToHost);
change = last_change - previous;
previous = last_change;
} while (change);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch simulate kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy data back to host
cudaMemcpy(h_velocity_x, parity ? d_velocity_x2 : d_velocity_x, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_velocity_y, parity ? d_velocity_y2 : d_velocity_y, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_x, parity ? d_position_x2 : d_position_x, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_position_y, parity ? d_position_y2 : d_position_y, size, cudaMemcpyDeviceToHost);
// Free memory on device
cudaFree(d_velocity_x);
cudaFree(d_velocity_y);
cudaFree(d_position_x);
cudaFree(d_position_y);
cudaFree(d_velocity_x2);
cudaFree(d_velocity_y2);
cudaFree(d_position_x2);
cudaFree(d_position_y2);
cudaFree(counter);
// Print data
for (int i = 0; i < numOfElements; ++i) {
std::cout << h_position_x[i] << " " << h_position_y[i] << std::endl;
}
// Free memory on host
free(h_position_x);
free(h_position_y);
free(h_velocity_x);
free(h_velocity_y);
return 0;
}
|
5,963 | #include<stdio.h>
#include<stdlib.h>
#include<curand_kernel.h>
#include<curand.h>
#include<sys/time.h>
#include<math.h>
unsigned int NUM_ITER = 1000000000;
unsigned int NUM_ITERATIONS = 1000;
unsigned int BLOCK_SIZE = 192;
unsigned int GRID_SIZE = (NUM_ITER/(NUM_ITERATIONS*BLOCK_SIZE));
__global__ void gpu_random(curandState *states, int Niterations, int *count) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
int seed = id;
double x,y,z;
count[id] = 0;
curand_init(seed, id, 0, &states[id]);
for(int i=0; i<Niterations; i++)
{
x = curand_uniform(&states[id]);
y = curand_uniform(&states[id]);
z = sqrt((x*x) + (y*y));
if(z <= 1.0)
count[id] += 1;
}
__syncthreads();
}
int main(int argc, char *argv[])
{
if(argc != 4)
{
printf("No. of arguments to be passed should be 2,1st arg as total Num of Iteration, 2nd as num of iteartion per thread, 3rd as BLock Size\n");
exit(1);
}
NUM_ITER = atoi(argv[1]);
NUM_ITERATIONS = atoi(argv[2]);
BLOCK_SIZE = atoi(argv[3]);
struct timeval start_time;
struct timeval stop_time;
curandState *dev_random;
cudaMalloc((void**)&dev_random, BLOCK_SIZE*GRID_SIZE*sizeof(curandState));
int *countCPU = NULL;
countCPU = (int*)malloc(BLOCK_SIZE*GRID_SIZE*sizeof(int));
int *countGPU = NULL;
cudaMalloc(&countGPU, BLOCK_SIZE*GRID_SIZE*sizeof(int));
gettimeofday(&start_time, NULL);
gpu_random<<<GRID_SIZE, BLOCK_SIZE>>>(dev_random, NUM_ITERATIONS, countGPU);
cudaDeviceSynchronize();
cudaMemcpy(countCPU, countGPU, BLOCK_SIZE*GRID_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
int finalCount;
for(int i=0; i<(BLOCK_SIZE*GRID_SIZE); i++)
finalCount += countCPU[i];
double pi;
pi = ((double)finalCount / (double)NUM_ITER) * 4.0;
gettimeofday(&stop_time, NULL);
printf("The result of PI is %lf\n",pi);
printf("Total time of Execution to calculate PI using GPU is: %ld usec\n\n",
(stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec));
cudaFree(dev_random);
cudaFree(countGPU);
free(countCPU);
return 0;
}
|
5,964 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
template<unsigned int blockSize>
__device__ void warpReduce(volatile float *sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
template<unsigned int blockSize>
__global__ void reduce6(float *g_idata, float *g_odata) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; }
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; }
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; }
__syncthreads();
}
if (tid < 32)warpReduce<blockSize>(sdata, tid);
}
}
int main(void) {
int N = 100000000;
float *g_indata_host, *g_indata_device, *g_outdata_host, *g_outdata_device;
g_indata_host = (float *) malloc(N * sizeof(float));
g_outdata_host = (float *) malloc(sizeof(float));
cudaMalloc(&g_indata_device, N * sizeof(float));
cudaMalloc(&g_outdata_device, sizeof(float));
for (auto i = 0; i < N; i++) {
g_indata_host[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);;
}
cudaMemcpy(g_indata_device, g_indata_host, N * sizeof(float), cudaMemcpyHostToDevice);
// This is where the code is run
auto dimGrid = 512;
auto dimBlock = 512;
auto smemSize = 128 * sizeof(float);
auto threads = 512;
auto start = high_resolution_clock::now();
switch (threads) {
case 512:
reduce6<512><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
case 256:
reduce6<256><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
case 128:
reduce6<128><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
case 64:
reduce6<64><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
case 32:
reduce6<32><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
case 16:
reduce6<16><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
case 8:
reduce6<8><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
case 4:
reduce6<4><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
case 2:
reduce6<2><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
case 1:
reduce6<1><<<dimGrid, dimBlock, smemSize>>>(g_indata_device, g_outdata_device);
break;
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
std::cout << "Time taken by function: "
<< duration.count() << " microseconds" << std::endl;
cudaFree(g_indata_device);
cudaFree(g_outdata_device);
free(g_indata_host);
free(g_outdata_host);
} |
5,965 | #include <cuda.h>
#include <cstdio>
#include <cstdlib>
__global__ void kernel(size_t n_to_print) {
size_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < n_to_print) {
printf("Hello from thread %lu!\n", tid);
}
}
int main(int argc, char** argv) {
size_t grid_size = 1000;
size_t block_size = 256;
// ceil(grid_size / block_size)
dim3 grid((grid_size + block_size - 1)/ block_size);
dim3 block(block_size);
kernel<<<grid, block>>>(grid_size);
cudaDeviceSynchronize();
return 0;
}
|
5,966 | #include <stdio.h>
int main(void) {
cudaDeviceProp prop;
int dev;
cudaGetDevice (&dev);
printf ("ID of current CUDA device: %d\n", dev);
memset (&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1;
prop.minor = 3;
cudaChooseDevice (&dev, &prop);
printf ("ID of CUDA device closest to revision 1.3: %d\n", dev);
cudaSetDevice (dev);
return 0;
}
|
5,967 | #include "includes.h"
__global__ void kernel(unsigned char *ptr, int ticks) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx*fx + fy*fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d / 10.0f - ticks / 7.0f) / (d / 10.0f + 1.0f));
ptr[offset * 4 + 0] = grey;
ptr[offset * 4 + 1] = grey;
ptr[offset * 4 + 2] = grey;
ptr[offset * 4 + 3] = 255;
} |
5,968 | #include <stdio.h>
// TMC Faster
__constant__ int mapping[20] = {0, -1, 3, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1};
// Thread i will score genome[i*seqlength] to genome[i*seqlength+(seqlength-1)]
__global__ void scoreReads(char* genome, int seqLength, int order, float* model, float* scores) {
int i = blockIdx.x; // Thread identifier, assign to i
int j = threadIdx.x;
// Keep scores in shared memory
extern __shared__ float kmer_scores[]; // Call this with [lengths[i] / order + 1];
//if (i ==0) printf("%s\n", genome);
// Start spot
int seqspot = i*seqLength;
int startspot, stopspot;
if (j == 0) {
startspot = seqspot;
stopspot = startspot+(order-1);
}
else{
startspot = seqspot+(j-1);
stopspot = startspot+order;
}
//printf("Block %d Thread %d Startspot %d Stopspot %d\n", i, j, startspot, stopspot);
// Quick loop, check for n's
// Actually, decided to inline it rather than loop twice.
int a;
bool nFlag = false;
int mapVal = 0;
for (a = startspot; a < stopspot; a++) {
//if (j == 0 && i == 0) printf("%d %d\n", startspot, a);
//if (j == 0 && i == 0) printf("%c\n", genome[a]);
if (genome[a] == 'N') {
//if (i == 0) printf("FOUND N, BREAKING.\n");
kmer_scores[j] = 0;
nFlag = true;
break;
}
else
mapVal = 4*mapVal + mapping[(int)genome[a]-65];
}
if (!nFlag) {
if (j == 0) {mapVal += pow(4.0, 9.0);}
kmer_scores[j] = model[mapVal]; // Illegal here
//if (i == 0) printf("Thread: %d Mapval: %d Score: %f\n", j, mapVal, kmer_scores[j]);
}
__syncthreads();
/////// TMC TAKE OUT LATER
/*if (j == 0) {
int m;
float tmpscore=0;
for (m = seqspot; m < seqspot+seqLength-order+1+1; m++) {
if (i == 0) printf("%d: Score: %f New Partial Score: %f\n", m+8, kmer_scores[m-seqspot], tmpscore);
tmpscore += kmer_scores[m-seqspot];
}
if (i == 0) printf("The score for sequence %d should be: %f\n", i, tmpscore);
}
__syncthreads();*/
//////////////////////////
// Do the addition in parallel as well.
//if (j == 0 && i == 0) printf("Number: %d", seqLength-order+1);
if ((j == 0) && (((seqLength-order+1+1) % 2 == 1))) {/*if (i == 0) printf("Thread %d adding %d (%f) and %d (%f)", j, j, kmer_scores[j], seqLength-order+1, kmer_scores[seqLength-order+1]);*/ kmer_scores[j] = kmer_scores[j] + kmer_scores[seqLength-order+1]; /*if (i == 0) printf(" to get: %f\n", kmer_scores[j]);*/ }
int k = (seqLength-order+1+1)/2;
while (k >= 1) {
//if (i == 0 && j == 0) printf("k is %d\n", k);
if (j < k) {
//if (i == 0) printf("k: %d Thread %d adding %d (%f) and %d (%f) to get: %f\n", k, j, j, kmer_scores[j], j+k, kmer_scores[j+k], kmer_scores[j]+kmer_scores[j+k]);
kmer_scores[j] = kmer_scores[j] + kmer_scores[j+k]; // Illegal here
}
__syncthreads();
if (k != 1 && k % 2 != 0 && j == 0) {/*printf("k: %d Thread %d adding %d (%f) and %d (%f) to get: %f\n", k, j, j, kmer_scores[j], k-1, kmer_scores[k-1], kmer_scores[j]+kmer_scores[k-1]);*/ kmer_scores[j] = kmer_scores[j] + kmer_scores[k-1]; }// Uneven split, would be left out otherwise.
k /= 2;
}
// The first kmer_score is now the final.
if (j == 0) {
scores[i] = kmer_scores[0];
//printf("Kernel score for sequence %d: %f\n", i, scores[i]);
}
}
|
5,969 | #include <iostream>
#include <fstream>
#include <chrono>
#include <iomanip>
#include <math.h>
#include <stdint.h>
#include <float.h>
#include <limits.h>
#include <stdlib.h>
#include <cuda_runtime.h>
struct Pixel {
unsigned char r, g, b;
};
struct Vec {
float x,y,z;
__forceinline__ __device__ Vec(float v = 0) {x = y = z = v;}
__forceinline__ __device__ Vec(float a, float b, float c = 0) {x = a;y = b;z = c;}
__forceinline__ __device__ Vec operator+(const Vec r) const { return Vec(x + r.x , y + r.y , z + r.z); }
__forceinline__ __device__ Vec operator*(const Vec r) const { return Vec(x * r.x , y * r.y , z * r.z); }
__forceinline__ __device__ float operator%(const Vec r) const {return x * r.x + y * r.y + z * r.z;}
__forceinline__ __device__ Vec operator!() { return *this * (1.0/sqrtf(*this % *this)); }
};
__forceinline__ __device__
float randomVal(unsigned int& x) {
x = (214013*x+2531011);
return ((x>>16)&0x7FFF) / (float)66635;
}
// Rectangle CSG equation. Returns minimum signed distance from
// space carved by
// lowerLeft vertex and opposite rectangle vertex upperRight.
__forceinline__ __device__
float BoxTest(const Vec& position, Vec lowerLeft, Vec upperRight) {
lowerLeft = position + lowerLeft * -1.0f;
upperRight = upperRight + position * -1.0f;
return -fminf(fminf(fminf(lowerLeft.x, upperRight.x),
fminf(lowerLeft.y, upperRight.y)),
fminf(lowerLeft.z, upperRight.z));
}
#define HIT_NONE 0
#define HIT_LETTER 1
#define HIT_WALL 2
#define HIT_SUN 3
// Sample the world using Signed Distance Fields.
__forceinline__ __device__
float QueryDatabase(const Vec& position, int &hitType) {
float distance = 1e9;//FLT_MAX;
Vec f = position; // Flattened position (z=0)
f.z = 0;
const float lines[10*4] = {
-20.0f, 0.0f, -20.0f, 16.0f,
-20.0f, 0.0f, -14.0f, 0.0f,
-11.0f, 0.0f, -7.0f, 16.0f,
-3.0f, 0.0f, -7.0f, 16.0f,
-5.5f, 5.0f, -9.5f, 5.0f,
0.0f, 0.0f, 0.0f, 16.0f,
6.0f, 0.0f, 6.0f, 16.0f,
0.0f, 16.0f, 6.0f, 0.0f,
9.0f, 0.0f, 9.0f, 16.0f,
9.0f, 0.0f, 15.0f, 0.0f
};
for (unsigned i = 0; i < sizeof(lines)/sizeof(float); i += sizeof(float)) {
Vec begin = Vec(lines[i], lines[i + 1]) * 0.5f;
Vec e = Vec(lines[i + 2], lines[i + 3]) * 0.5f + begin * -1.0f;
Vec o = f + (begin + e * fminf(-fminf((((begin + f * -1) % e )/(e % e)), 0),1)) * -1.0f;
distance = fminf(distance, o % o); // compare squared distance.
}
distance = sqrtf(distance); // Get real distance, not square distance.
distance = powf(powf(distance, 8)+powf(position.z, 8), 0.125f) - 0.5f;
hitType = HIT_LETTER;
float roomDist ;
roomDist = fminf(-fminf(
BoxTest(position, Vec(-30.0f, -0.5f, -30.0f), Vec(30.0f, 18.0f, 30.0f)),
BoxTest(position, Vec(-25.0f, 17.0f, -25.0f), Vec(25.0f, 20.0f, 25.0f))),
BoxTest( // Ceiling "planks" spaced 8 units apart.
Vec(fmodf(fabsf(position.x), 8.0f), position.y, position.z),
Vec(1.5f, 18.5f, -25.0f),
Vec(6.5f, 20.0f, 25.0f)));
if (roomDist < distance) {
distance = roomDist;
hitType = HIT_WALL;
}
float sun = 19.9f - position.y; // Everything above 19.9 is light source.
if (sun < distance) {
distance = sun;
hitType = HIT_SUN;
}
return distance;
}
// Perform signed sphere marching
// Returns hitType 0, 1, 2, or 3 and update hit position/normal
__forceinline__ __device__
int RayMarching(const Vec& origin, const Vec& direction, Vec& hitPos, Vec& hitNorm) {
int hitType = HIT_NONE;
int noHitCount = 0;
// Signed distance marching
float d; // distance from closest object in world.
for (float total_d = 0.0f; total_d < 100.0f; total_d += d) {
d = QueryDatabase(hitPos = origin + direction * total_d, hitType);
if (d < .01f || ++noHitCount > 99) {
hitNorm = !Vec(QueryDatabase(hitPos + Vec(0.01f, 0.00f), noHitCount) - d,
QueryDatabase(hitPos + Vec(0.00f, 0.01f), noHitCount) - d,
QueryDatabase(hitPos + Vec(0.00f, 0.00f, 0.01f), noHitCount) - d);
return hitType;
}
}
return HIT_NONE;
}
__forceinline__ __device__
Vec Trace(Vec origin, Vec direction, unsigned int& rn) {
Vec sampledPosition;
Vec normal;
Vec color(0.0f, 0.0f, 0.0f);
Vec attenuation(1.0f);
Vec lightDirection(!Vec(0.6f, 0.6f, 1.0f)); // Directional light
for (int bounceCount = 8; bounceCount--;) {
int hitType = RayMarching(origin, direction, sampledPosition, normal);
if (hitType == HIT_NONE)
break; // No hit. This is over, return color.
else if (hitType == HIT_LETTER) { // Specular bounce on a letter. No color acc.
direction = direction + normal * (normal % direction * -2);
origin = sampledPosition + direction * 0.1f;
attenuation = attenuation * 0.2f; // Attenuation via distance traveled.
} else if (hitType == HIT_WALL) { // Wall hit uses color yellow?
float incidence = normal % lightDirection;
float p = 6.283185f * randomVal(rn);
float c = randomVal(rn);
float s = sqrtf(1.0f - c);
float g = normal.z < 0.0f ? -1.0f : 1.0f;
float u = (-1.0f / (g + normal.z));
float v = normal.x * normal.y * u;
float cosp;
float sinp;
sinp = sinf(p);
cosp = cosf(p);
//sincosf(p, &sinp, &cosp);
direction = Vec(v, g + normal.y * normal.y * u, -normal.y) * (cosp * s) +
Vec(1 + g * normal.x * normal.x * u, g * v, -g * normal.x) *
(sinp * s) + normal * sqrtf(c);
origin = sampledPosition + direction * 0.1f;
attenuation = attenuation * 0.2f;
if (incidence > 0.0f &&
RayMarching(sampledPosition + normal * 0.1f, lightDirection,
sampledPosition, normal) == HIT_SUN)
color = color + attenuation * Vec(500, 400, 100) * incidence;
}
else if (hitType == HIT_SUN) { //
color = color + attenuation * Vec(50, 80, 100);
break; // Sun Color
}
}
return color;
}
__global__
void PathTracer(int sampleCount, Pixel *img, int totalPixels,
unsigned imgWidth, unsigned imgHeight) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < totalPixels) {
int x = index % imgWidth;
int y = index / imgWidth;
const Vec position(-12.0f, 5.0f, 25.0f);
const Vec goal = !(Vec(-3.0f, 4.0f, 0.0f) + position * -1.0f);
const Vec left = !Vec(goal.z, 0, -goal.x) * (1.0f / imgWidth);
// Cross-product to get the up vector
const Vec up(goal.y *left.z - goal.z * left.y,
goal.z *left.x - goal.x * left.z,
goal.x *left.y - goal.y * left.x);
Vec color;
for (unsigned int p = sampleCount, v = index; p--;) {
Vec rand_left = Vec(randomVal(v), randomVal(v), randomVal(v))*.001;
float xf = x + randomVal(v);
float yf = y + randomVal(v);
color = color + Trace(position, !((goal+rand_left) + left *
((xf - imgWidth / 2.0f) + randomVal(v)) + up *
((yf - imgHeight / 2.0f) + randomVal(v))), v);
}
// Reinhard tone mapping
color = color * (1.0f / sampleCount) + 14.0f / 241.0f;
Vec o = color + 1.0f;
color = Vec(color.x / o.x, color.y / o.y, color.z / o.z) * 255.0f;
img[index].r = (unsigned char)color.x;
img[index].g = (unsigned char)color.y;
img[index].b = (unsigned char)color.z;
}
}
int main(int argc, char **argv) {
using namespace std;
unsigned int sampleCount = 1 << 7;
unsigned int imageWidth = 1280;
unsigned int imageHeight = 1024;
if (argc > 1) {
if (argc == 2)
sampleCount = atoi(argv[1]);
else if (argc == 4) {
imageWidth = atoi(argv[2]);
sampleCount = atoi(argv[1]);
imageHeight = atoi(argv[3]);
} else {
cout << "usage: raytracer [#samples] [img-width img-height]\n";
return 1;
}
}
cout << "\n";
cout << "---- Raytracer benchmark (cuda) ----\n"
<< " Image size : " << imageWidth << "x" << imageHeight << "\n"
<< " Samples/pixel : " << sampleCount << "\n\n";
cout << " Allocating image..." << std::flush;
cudaError_t err = cudaSuccess;
Pixel *img;
size_t totalPixels = imageWidth * imageHeight;
err = cudaMallocManaged(&img, totalPixels * sizeof(Pixel));
if (err != cudaSuccess) {
fprintf(stderr, "failed to allocate managed memory!\n");
return 1;
}
cout << " done.\n\n";
cout << " Starting benchmark..." << std::flush;
auto start_time = chrono::steady_clock::now();
int threadsPerBlock = 256;
int blocksPerGrid = (totalPixels + threadsPerBlock - 1) / threadsPerBlock;
PathTracer<<<blocksPerGrid, threadsPerBlock>>>(sampleCount, img, totalPixels,
imageWidth, imageHeight);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
auto end_time = chrono::steady_clock::now();
double elapsed_time = chrono::duration<double>(end_time-start_time).count();
cout << "\n\n Total time: " << elapsed_time << " seconds.\n";
cout << " Pixels/second: " << totalPixels / elapsed_time << ".\n\n";
cout << " Saving image..." << std::flush;
std::ofstream img_file;
img_file.open ("raytrace-cuda.ppm");
img_file << "P6 " << imageWidth << " " << imageHeight << " 255 ";
for(int i = totalPixels-1; i >= 0; i--)
img_file << img[i].r << img[i].g << img[i].b;
img_file.close();
cout << " done.\n\n"
<< "*** " << elapsed_time << ", " << elapsed_time << "\n"
<< "----\n\n";
return 0;
}
|
5,970 | #include<stdio.h>
#include<iostream>
#include <stdlib.h>
#include <algorithm>
#define MAX_BLOCK_DIM_SIZE 65535
using namespace std;
__global__ void reduce(int *g_idata, int *g_odata, int num_bytes) {
// create shared memory array
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
// sync threads
__syncthreads();
// do reduction in shared mem
// s = 1,2,4,8,...
for(unsigned int s=1; s < blockDim.x; s *= 2) {
// if tid is multiple of 2,4,8,....
if (tid % (2*s) == 0) {
// ie: sdata[4] += sdata[4+2]
// sdata[6] += sdata[6+2]
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// now they are all added up...
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
////////////////////////////////////////////////////////////////////////////////
// Compute the number of threads and blocks to use for the given reduction kernel
// For the kernels >= 3, we set threads / block to the minimum of maxThreads and
// n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel
// 6, we observe the maximum specified number of blocks, because each thread in
// that kernel can process a variable number of elements.
////////////////////////////////////////////////////////////////////////////////
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
#define MIN(x,y) ((x < y) ? x : y)
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
if (whichKernel == 6)
blocks = MIN(maxBlocks, blocks);
}
int main()
{
// Number of values to be added together / averaged / whatever.
int Nvals = 1<<18; //pow(2,22);
// Number of Threads
int maxThreads = 256;
//int Threads = 256;//Nvals;//256;
//int numThreads = 256;//Nvals;//256;
// Number of Blocks
int maxBlocks = MIN( Nvals / maxThreads, MAX_BLOCK_DIM_SIZE);//64;
//int numBlocks = 64;
int num_bytes = Nvals*sizeof(int);
// Allocate memory on host (CPU)
int* h_idata = (int*)malloc(num_bytes);
int* h_odata = (int*)malloc(num_bytes);
int* d_idata=0;
int* d_odata=0;
// Allocate memory on device (GPU)
cudaMalloc((void**)&d_idata,num_bytes);
cudaMalloc((void**)&d_odata,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_idata || 0==d_odata)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_idata,0,num_bytes);
cudaMemset(d_odata,0,num_bytes);
// Let's create random numbers for input on CPU
int sum=0;
for(int i=0; i<Nvals; i++) {
h_idata[i] = (int)(rand() % 100);
sum += h_idata[i];
//std::cout<<h_idata[i]<<std::endl;
}
// copy it over
cudaMemcpy(d_idata,h_idata,num_bytes,cudaMemcpyHostToDevice);
int s=Nvals;
int threads=s;
int blocks=256;
// for(int j = 4; j>0; j--){
// if(j==4) s=Nvals;
// if(j==3) s=Nvals/(1<<6);
// if(j==2) s=Nvals/(1<<12);
// if(j==2) s=Nvals/(1<<18);
// getNumBlocksAndThreads(6, s, maxBlocks, maxThreads, blocks, threads);
blocks=256;
threads = blocks / 2;
cout<<blocks<<" Blocks, "<<threads<<" Threads"<<endl;
// Still don't really know what I'm doing here
dim3 grid,block;
block.x = blocks;
// block.y = 4;
grid.x = Nvals/block.x;
//grid.y = NPTS/block.y;
int smemSize = (threads <= 32) ? 2 * threads * sizeof(int) : threads * sizeof(int);
reduce<<< grid, block, smemSize >>>(d_idata, d_odata, num_bytes);
block.x = 4;
Nvals = Nvals / blocks;
grid.x = Nvals/block.x;
// }
// copy it back
cudaMemcpy(h_odata,d_odata,num_bytes,cudaMemcpyDeviceToHost);
for(int i=0; i<Nvals; i++) {
if(h_odata[i] == 0) continue;
else{std::cout <<i<<" "<<h_odata[i]<<std::endl;}
}
printf("%d \n",h_odata[0]);
printf("Should be %d \n",sum);
return 0;
}
|
5,971 | #include "includes.h"
__global__ void norm_components(float* N, int npix, float* norm) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < npix) {
norm[i] = fmaxf(1e-10, sqrtf(N[i] * N[i] + N[npix + i] * N[npix + i] + N[npix * 2 + i] * N[npix * 2 + i]));
}
} |
5,972 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda.h>
#define THREADS 16
#define BLOCKS 2
__global__ void add(int *array) {
int temp = 0;
int before = (blockIdx.x * blockDim.x + threadIdx.x + 1) % (THREADS * BLOCKS);
int after = (blockIdx.x * blockDim.x + threadIdx.x - 1) % (THREADS * BLOCKS);
temp += array[blockIdx.x * blockDim.x + threadIdx.x];
temp += array[before];
temp += array[after];
__syncthreads(); //evita condición de carrera...
array[blockIdx.x * blockDim.x + threadIdx.x] = temp;
}
void init(int* h_v, int numb) {
for (int i = 0; i < THREADS * BLOCKS; i++) {
h_v[i] = numb;
}
}
int main( void ) {
int *result, *h_a;
int *dev_a;
int size = THREADS * BLOCKS * sizeof(int);
result = (int*) malloc( size );
h_a = (int*) malloc( size );
memset(result, 0, size);
memset(h_a, 0, size);
init(h_a, 1);
cudaMalloc(&dev_a, size);
// se transfieren los datos a memoria de dispositivo...
cudaMemcpy(dev_a, h_a, size, cudaMemcpyHostToDevice);
add<<<BLOCKS, THREADS>>>(dev_a);
// se transfieren los datos del dispositivo a memoria.
cudaMemcpy(result, dev_a, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < THREADS * BLOCKS; i++) {
fprintf(stderr, "Result : %d\n", result[i]);
}
free(h_a), free(result);
cudaFree(dev_a);
return 0;
}
|
5,973 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
//__global__ void hello_kernel()
//{
// printf("Hello cuda world \n");
//}
//int main()
//{
// printf("hello from main \n");
//
// dim3 block();
//
// hello_kernel <<< 1, 1 >>> ();
//
// cudaDeviceSynchronize();
// cudaDeviceReset();
// return 0;
//} |
5,974 | extern "C"
__global__ void exec(int iterations, int size,
float* inputR, float* inputI, // Real/Imaginary input
int* output // Output image in one dimension
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float cR = inputR[i];
float cI = inputI[i];
float q = ((cR - (1.0 / 4.0)) * (cR - (1.0 / 4.0))) + (cI * cI);
if (q * (q + (cR - (1.0 / 4.0))) < (1.0 / 4.0) * (cI * cI)
|| (cR + 1.0) * (cR + 1.0) + (cI * cI) < (1.0 / 16.0))
return;
float x = 0;
float y = 0;
float xNew = 0;
float yNew = 0;
int divergeIndex = 0;
for (int j = 0; j < iterations; j++) {
xNew = (x * x) - (y * y) + cR;
yNew = (2 * x * y) + cI;
if (xNew * xNew + yNew * yNew > 4) {
divergeIndex = j;
break;
}
x = xNew;
y = yNew;
}
if (divergeIndex == 0) {
return;
}
x = 0;
y = 0;
xNew = 0;
yNew = 0;
int curX = 0;
int curY = 0;
int idx = 0;
for (int j = 0; j < divergeIndex; j++) {
xNew = (x * x) - (y * y) + cR;
yNew = (2 * x * y) + cI;
curX = (xNew + 2 ) * size / 4;
curY = (yNew + 2 ) * size / 4;
idx = curX + size * curY;
output[idx]++;
output[idx]++;
x = xNew;
y = yNew;
}
} |
5,975 |
/*!
* This file provides structure and function definitions for the Vector2 and
* Matrix2x2 types, which are vector and matrix types with fixed dimensions.
* The operations defined for these types compute outputs directly without the
* use of loops. These types are useful for any algorithm that operates on
* pairwise data.
*
* Since CUDA provides built-in vector types, Vector2 and Matrix2x2 are
* defined in terms of these types. The following mapping is used to map
* indices to xyzw:
*
* ELEM(M, 0, 0) = M->x
* ELEM(M, 0, 1) = M->y
* ELEM(M, 1, 0) = M->z
* ELEM(M, 1, 1) = M->w
*/
typedef float2 Vector2;
typedef float4 Matrix2x2;
#define vectorInitZero(a) \
(a)->x = 0; \
(a)->y = 0;
#define vectorAdd(a, b) \
(a)->x += (b)->x; \
(a)->y += (b)->y;
#define vectorAddScaled(a, c, b) \
(a)->x += (c) * (b)->x; \
(a)->y += (c) * (b)->y;
#define vectorSubtract(a, b) \
(a)->x -= (b)->x; \
(a)->y -= (b)->y;
#define vectorScale(a, c) \
(a)->x *= (c); \
(a)->y *= (c);
#define vectorDot(a, b) \
((a)->x * (b)->x + (a)->y * (b)->y)
#define SQR(x) ((x)*(x))
#define vectorDiffNorm(a, b) \
sqrt(SQR((a)->x - (b)->x) + SQR((a)->y - (b)->y))
#define matrixInitIdentity(M) \
(M)->x = 1; \
(M)->y = 0; \
(M)->z = 0; \
(M)->w = 1;
#define matrixInitZero(M) \
(M)->x = 0; \
(M)->y = 0; \
(M)->z = 0; \
(M)->w = 0;
#define matrixScale(A, c) \
(A)->x *= (c); \
(A)->y *= (c); \
(A)->z *= (c); \
(A)->w *= (c);
#define matrixInverse(A, B, det) \
*det = (A)->x * (A)->w - (A)->y * (A)->z; \
(B)->x = +(A)->w / (*det); \
(B)->y = -(A)->y / (*det); \
(B)->z = -(A)->z / (*det); \
(B)->w = +(A)->x / (*det);
#define matrixProduct(A, x_, b) \
(b)->x = (A)->x * (x_)->x + (A)->y * (x_)->y; \
(b)->y = (A)->z * (x_)->x + (A)->w * (x_)->y;
#define matrixAddOuterProduct(A, c, x_) \
(A)->x += (c) * (x_)->x * (x_)->x; \
(A)->y += (c) * (x_)->x * (x_)->y; \
(A)->z += (c) * (x_)->y * (x_)->x; \
(A)->w += (c) * (x_)->y * (x_)->y;
|
5,976 |
#ifdef __NVCC__
//K in parallel
template < class U >
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
template < class T, class U >
__global__ void A_star_expand(int* off,int* edge,T* W, U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}//end
}
//K in parallel -- O(N)
template < class U >
__global__ void keepHeapPQ(unsigned int* PQ, unsigned int* PQ_size,U* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
template <class U >
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
template < class U >
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
template <class U>
__global__ void getCx(U* Cx,int dest,U* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
#endif
|
5,977 | /*
Based on the hello-world created by Ingemar Ragnemalm 2010
(http://computer-graphics.se/hello-world-for-cuda.html)
and the book "CUDA by Example"
This example code detects CUDA devices, print their information
and tests the parallel programing using CUDA
Author: João Ribeiro
nvcc check-cuda.cu -L /usr/local/cuda/lib -lcudart -o check-cuda
*/
#include <stdio.h>
#include <unistd.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
int dev_count;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
cudaDeviceProp prop;
cudaGetDeviceCount(&dev_count);
printf("Number of CUDA devices found: %d\n\n", dev_count);
/* Get and print GPU information */
for (int i = 0; i < dev_count; i++) {
cudaGetDeviceProperties(&prop, i);
printf( "--- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap:" );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout :" );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "--- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( "--- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] );
printf( "Max grid dimensions:(%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] );
printf( "\n" );
}
/* End of print GPU information */
printf("The next print will be the result of a parallel processed array. If you see the string \"Hello World!\" then CUDA is working!\n\n");
printf("%s", a);
/* Using CUDA to generate the string "World!"*/
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
/* End of using CUDA to generate the string "World!"*/
printf("%s\n\n", a);
usleep(1000);
return EXIT_SUCCESS;
}
|
5,978 | #include "includes.h"
__global__ void cuda_cosineDistance(double *x, double* y, int64_t len, double *dot_product, double *norm_x, double*norm_y)
{
int64_t idx = threadIdx.x + blockIdx.x * blockDim.x;
int64_t cacheIdx = threadIdx.x;
__shared__ double dot_cache[threadsPerBlock];
__shared__ double norm_x_cache[threadsPerBlock];
__shared__ double norm_y_cache[threadsPerBlock];
double dot_tmp = 0;
double norm_x_tmp = 0;
double norm_y_tmp = 0;
while(idx < len)
{
dot_tmp += x[idx] * y[idx];
norm_x_tmp += x[idx] * x[idx];
norm_y_tmp += y[idx] * y[idx];
idx += blockDim.x * gridDim.x;
}
dot_cache[cacheIdx] = dot_tmp;
norm_x_cache[cacheIdx] = norm_x_tmp;
norm_y_cache[cacheIdx] = norm_y_tmp;
__syncthreads();
int64_t i = blockDim.x/2;
while(i!=0)
{
if(cacheIdx < i)
{
dot_cache[cacheIdx] += dot_cache[cacheIdx + i];
norm_x_cache[cacheIdx] += norm_x_cache[cacheIdx + i];
norm_y_cache[cacheIdx] += norm_y_cache[cacheIdx + i];
}
__syncthreads();
i/=2;
}
if(cacheIdx == 0)
{
dot_product[blockIdx.x] = dot_cache[0];
norm_x[blockIdx.x] = norm_x_cache[0];
norm_y[blockIdx.x] = norm_y_cache[0];
}
} |
5,979 | #include "includes.h"
/////////////////////////////////////////////////////////
// Computes the 1-stencil using GPUs.
// We don't check for error here for brevity.
// In your implementation - you must do it!
#define BLOCK_SIZE 1024
#define WARP_SIZE 32
#ifndef k
#define k 3
#endif
#ifndef OUTPUT_PER_THREAD
#define OUTPUT_PER_THREAD 1
#endif
#define LOCAL_REGISTER_SIZE ((1+OUTPUT_PER_THREAD) > (k+31)/32 ? (1+OUTPUT_PER_THREAD) : (k+31)/32)
#ifndef TEST_TIMES
#define TEST_TIMES 5
#endif
float host_k_stencil (int *A, int *B, int sizeOfA, int withRc);
__global__ void k_stencil (int *A, int *B, int sizeOfA)
{
extern __shared__ int s[];
// Id of thread in the block.
int localId = threadIdx.x;
// The first index of output element computed by this block.
int startOfBlock = blockIdx.x * blockDim.x * OUTPUT_PER_THREAD;
// The Id of the thread in the scope of the grid.
int globalId = localId + startOfBlock;
if (globalId >= sizeOfA)
return;
// Fetching into shared memory.
for (int i = 0 ; i < OUTPUT_PER_THREAD ; ++i)
{
if (globalId + i*BLOCK_SIZE < sizeOfA)
{
s[localId + i*BLOCK_SIZE] = A[globalId + i*BLOCK_SIZE];
}
}
if (localId < k && blockDim.x*OUTPUT_PER_THREAD + globalId < sizeOfA)
{
s[localId + blockDim.x*OUTPUT_PER_THREAD] = A[blockDim.x*OUTPUT_PER_THREAD + globalId];
}
// We must sync before reading from shared memory.
__syncthreads();
int sum = 0;
for (int j = 0 ; j < OUTPUT_PER_THREAD ; ++j)
{
sum = 0;
if (globalId + j*BLOCK_SIZE >= sizeOfA - k)
return;
for (int i = 0 ; i < k + 1 ; ++i)
{
sum += s[localId + j*BLOCK_SIZE + i];
}
B[globalId + BLOCK_SIZE*j] = sum ;
}
} |
5,980 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <iostream>
__global__ void mykernel(int *a, int *b, int *c, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n)
{
c[index] = a[index] + b[index];
}
}
int* genVector(int *p, int n)
{
std::cout << " Vector : " ;
for (int i = 0; i < n; i++)
{
p[i] = rand()/100;
std::cout << p[i] << " ";
}
std::cout << "" << std::endl;
return p;
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(void)
{
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
int n = 16;
int NUM_THREADS = 16;
int NUM_BLOCKS = (int)ceil(n + NUM_THREADS+1)/NUM_THREADS;
std::size_t bytes = sizeof(int)*n;
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
gpuErrchk(cudaMalloc(&d_a, bytes));
gpuErrchk(cudaMalloc(&d_b, bytes));
gpuErrchk(cudaMalloc(&d_c, bytes));
genVector(h_a, n);
genVector(h_b, n);
gpuErrchk(cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice));
cudaMemcpy(d_c, h_c, bytes, cudaMemcpyHostToDevice);
mykernel <<<NUM_BLOCKS, NUM_THREADS >>>(d_a, d_b, d_c, n);
gpuErrchk(cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost));
std::cout << "result : ";
for (int i = 0; i < n; i++)
std::cout << (h_c[i]) << " ";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
} |
5,981 | /*
EE 451
Course Project: Raytracer
Serial Version
Names: James Lee, Darwin Mendyke, Ahsan Zaman
*/
#include <stdlib.h>
#include <cmath>
#include <iostream>
#include <fstream>
#include <vector>
#include <string.h>
#include <time.h>
using namespace std;
#define MAX_TRIANGLES 2000
#define MAX_SPHERES 10
#define MAX_LIGHTS 10
char *filename=0;
//bool done = false;
//you may want to make these smaller for debugging purposes
#define WIDTH 640
#define HEIGHT 480
//the field of view of the camera
#define fov 60.0
unsigned char buffer[HEIGHT][WIDTH][3];
struct Vertex
{
double position[3];
double color_diffuse[3];
double color_specular[3];
double normal[3];
double shininess;
};
typedef struct _Triangle
{
struct Vertex v[3];
} Triangle;
typedef struct _Sphere
{
double position[3];
double color_diffuse[3];
double color_specular[3];
double shininess;
double radius;
} Sphere;
typedef struct _Light
{
double position[3];
double color[3];
} Light;
// Helper function to normalize a given vecter to a certain length, typically 1
__device__
void normalize(double p[3])
{
double distance = fmax(sqrt(pow(p[0], 2.0) + pow(p[1], 2.0) + pow(p[2], 2.0)), 0.001);
p[0] = p[0] / distance;
p[1] = p[1] / distance;
p[2] = p[2] / distance;
}
// Vector arithmitec function to perform vector subtraction
__device__
void subtract(double v0[3], double v1[3], double result[3])
{
for (int i = 0; i < 3; i++) result[i] = v0[i] - v1[i];
}
// Vector arithmitec function to perform vector multiplication
__device__
void multiply(double v[3], double s, double result[3])
{
for (int i = 0; i < 3; i++) result[i] = v[i] * s;
}
// Vector arithmitec function to calculate the vector cross product
__device__
void cross(double v0[3], double v1[3], double result[3])
{
result[0] = (v0[1] * v1[2]) - (v1[1] * v0[2]);
result[1] = (v1[0] * v0[2]) - (v0[0] * v1[2]);
result[2] = (v0[0] * v1[1]) - (v1[0] * v0[1]);
}
// Vector arithmitec function to calculate the vector dot product
__device__
double dot(double v0[3], double v1[3])
{
return (v0[0] * v1[0]) + (v0[1] * v1[1]) + (v0[2] * v1[2]);
}
// Helper function to compare distances between two points relative to the origin.
// Sets inter with the closer point
__device__
bool compDistances(double o[3], double (&inter)[3], double (&newInter)[3])
{
double a[3] = {inter[0] - o[0], inter[1] - o[1], inter[2] - o[2]};
double b[3] = {newInter[0] - o[0], newInter[1] - o[1], newInter[2] - o[2]};
if (fmax(sqrt(pow(b[0], 2.0) + pow(b[1], 2.0) + pow(b[2], 2.0)), 0.001) < fmax(sqrt(pow(a[0], 2.0) + pow(a[1], 2.0) + pow(a[2], 2.0)), 0.001))
{
for (int i = 0; i < 3; i++) inter[i] = newInter[i];
return true;
}
return false;
}
// Given a sphere and a ray, determines if there is an intersection.
// If so, stores the coords of the intersection
__device__
bool intersectsSphere(Sphere sphere, double o[3], double d[3], double (&intersection)[3])
{
double a = 1.0;
double b = 2 * (d[0] * (o[0] - sphere.position[0]) + d[1] * (o[1] - sphere.position[1]) + d[2] * (o[2] - sphere.position[2]));
double c = pow((o[0] - sphere.position[0]), 2.0) + pow((o[1] - sphere.position[1]), 2.0) + pow((o[2] - sphere.position[2]), 2.0) - pow(sphere.radius, 2.0);
double t0 = (-b + sqrt(pow(b, 2.0) - (4.0 * a * c))) / 2.0;
double t1 = (-b - sqrt(pow(b, 2.0) - (4.0 * a * c))) / 2.0;
if (fmin(t0, t1) > 0)
{
double newIntersection[3] = {o[0] + fmin(t0, t1) * d[0], o[1] + fmin(t0, t1) * d[1], o[2] + fmin(t0, t1) * d[2]};
if (compDistances(o, intersection, newIntersection)) return true;
}
return false;
}
// Given a triangle shape and a ray, determines if there is an intersection, using examples from the above link
__device__
bool intersectsTriangle(Triangle triangle, double o[3], double d[3], double (&intersection)[3], double (&bcoords)[3])
{
double v0v1[3], v0v2[3], pvec[3], tvec[3], qvec[3], t, u, v;
subtract(triangle.v[1].position, triangle.v[0].position, v0v1);
subtract(triangle.v[2].position, triangle.v[0].position, v0v2);
cross(d, v0v2, pvec);
double det = dot(v0v1, pvec);
double invDet = 1 / det;
subtract(o, triangle.v[0].position, tvec);
u = dot(tvec, pvec) * invDet;
if (u < 0 || u > 1) return false;
cross(tvec, v0v1, qvec);
v = dot(d, qvec) * invDet;
if (v < 0 || u + v > 1) return false;
t = dot(v0v2, qvec) * invDet;
if (t <= 0) return false; // checks if intersection is in positive ray direction or negative
double newIntersection[3] = {o[0] + t * d[0], o[1] + t * d[1], o[2] + t * d[2]};
if (compDistances(o, intersection, newIntersection))
{
bcoords[0] = u;
bcoords[1] = v;
bcoords[2] = 1.0 - bcoords[0] - bcoords[1];
return true;
}
return false;
}
// Recursive function to perform ray tracing given a ray.
__device__
void trace3(double o[3],double d[3],int num,double* trace_result,Triangle* triangles,Sphere* spheres,Light* lights,double* ambient_light,int* num_triangles,int* num_spheres,int* num_lights){
bool intersectTriangle = false, intersectSphere = false;
double bcoords[3], other1[3], intersection[3] = {1000.0, 1000.0, 1000.0};
int index = 0;
// looks for an intersection between the input ray and a shape
for (int i = 0; i < num_triangles[0]; i++) if (intersectsTriangle(triangles[i], o, d, intersection, bcoords)) intersectTriangle = true, index = i;
for (int i = 0; i < num_spheres[0]; i++) if (intersectsSphere(spheres[i], o, d, intersection)) intersectSphere = true, index = i;
// returns background color if no intersections or if reach max recursive call
if ((!intersectTriangle && !intersectSphere) || num > 2){
trace_result[0] = 1.0;
trace_result[1] = 1.0;
trace_result[2] = 1.0;
return;
}
double illumination[3] = {ambient_light[0], ambient_light[1], ambient_light[2]};
double l[3], n[3], n1[3], v[3], r[3], recursive_r[3], diffuse[3], specular[3], shiny;
// iterates through each light in the scene
for (int j = 0; j < num_lights[0]; j++)
{
for (int i = 0; i < 3; i++)
{
l[i] = lights[j].position[i] - intersection[i];
v[i] = o[i] - intersection[i];
}
if (intersectSphere)
{
for (int i = 0; i < 3; i++)
{
n[i] = intersection[i] - spheres[index].position[i];
diffuse[i] = spheres[index].color_diffuse[i];
specular[i] = spheres[index].color_specular[i];
}
shiny = spheres[index].shininess;
}
else if (intersectTriangle)
{
Triangle shape = triangles[index];
for (int i = 0; i < 3; i++)
{
n[i] = shape.v[0].normal[i] * bcoords[2] + shape.v[1].normal[i] * bcoords[0] + shape.v[2].normal[i] * bcoords[1];
diffuse[i] = shape.v[0].color_diffuse[i] * bcoords[2] + shape.v[1].color_diffuse[i] * bcoords[0] + shape.v[2].color_diffuse[i] * bcoords[1];
specular[i] = shape.v[0].color_specular[i] * bcoords[2] + shape.v[1].color_specular[i] * bcoords[0] + shape.v[2].color_specular[i] * bcoords[1];
}
shiny = shape.v[0].shininess * bcoords[2] + shape.v[1].shininess * bcoords[0] + shape.v[2].shininess * bcoords[1];
}
// initializes the shadow ray from the intersection point
double normalized_pos[3] = {lights[j].position[0] - intersection[0], lights[j].position[1] - intersection[1], lights[j].position[2] - intersection[2]};
double shadowIntersection[3] = {lights[j].position[0], lights[j].position[1], lights[j].position[2]};
double shadowOrigin[3] = {intersection[0] + 0.001 * n[0], intersection[1] + 0.001 * n[1], intersection[2] + 0.001 * n[2]};
bool shadow = false;
normalize(normalized_pos);
// checks if the shadow ray intersects with a shape
for (int i = 0; i < num_spheres[0]; i++)
if (intersectsSphere(spheres[i], shadowOrigin, normalized_pos, shadowIntersection)) shadow = true;
for (int i = 0; i < num_triangles[0]; i++)
if (intersectsTriangle(triangles[i], shadowOrigin, normalized_pos, shadowIntersection, other1)) shadow = true;
normalize(l);
normalize(n);
normalize(v);
multiply(n, 2 * dot(l, n), n1);
subtract(n1, l, r);
normalize(r); // calculates the reflection ray
// if there is no shadow at the point, calculates illumination using phong shading equation
if (!shadow)
{
for (int i = 0; i < 3; i++)
{
double a = diffuse[i] * fmax(0.0, dot(l, n));
double b = specular[i] * pow(fmax(0.0, dot(v, r)), shiny);
illumination[i] += lights[j].color[i] * (a + b);
illumination[i] = fmin(illumination[i], 1.0);
}
}
}
// uncomment this code below to recursively call tracer function on reflection ray
multiply(n, 2 * dot(v, n), n1);
subtract(n1, v, recursive_r);
//double recursiveOrigin[3] = {intersection[0] + 0.01 * recursive_r[0], intersection[1] + 0.01 * recursive_r[1], intersection[2] + 0.01 * recursive_r[2]};
normalize(recursive_r);
//double recurse_result[3] = {0,0,0};
// trace(recursiveOrigin,recursive_r,++num,recurse_result,triangles,spheres,lights,ambient_light,num_triangles,num_spheres,num_lights);
for (int i = 0; i < 3; i++)
{
trace_result[i] = (1 - specular[i]) * illumination[i] + specular[i];//* recurse_result[i];
}
}
// Recursive function to perform ray tracing given a ray.
__device__
void trace2(double o[3],double d[3],int num,double* trace_result,Triangle* triangles,Sphere* spheres,Light* lights,double* ambient_light,int* num_triangles,int* num_spheres,int* num_lights){
bool intersectTriangle = false, intersectSphere = false;
double bcoords[3], other1[3], intersection[3] = {1000.0, 1000.0, 1000.0};
int index = 0;
// looks for an intersection between the input ray and a shape
for (int i = 0; i < num_triangles[0]; i++) if (intersectsTriangle(triangles[i], o, d, intersection, bcoords)) intersectTriangle = true, index = i;
for (int i = 0; i < num_spheres[0]; i++) if (intersectsSphere(spheres[i], o, d, intersection)) intersectSphere = true, index = i;
// returns background color if no intersections or if reach max recursive call
if ((!intersectTriangle && !intersectSphere) || num > 2){
trace_result[0] = 1.0;
trace_result[1] = 1.0;
trace_result[2] = 1.0;
return;
}
double illumination[3] = {ambient_light[0], ambient_light[1], ambient_light[2]};
double l[3], n[3], n1[3], v[3], r[3], recursive_r[3], diffuse[3], specular[3], shiny;
// iterates through each light in the scene
for (int j = 0; j < num_lights[0]; j++)
{
for (int i = 0; i < 3; i++)
{
l[i] = lights[j].position[i] - intersection[i];
v[i] = o[i] - intersection[i];
}
if (intersectSphere)
{
for (int i = 0; i < 3; i++)
{
n[i] = intersection[i] - spheres[index].position[i];
diffuse[i] = spheres[index].color_diffuse[i];
specular[i] = spheres[index].color_specular[i];
}
shiny = spheres[index].shininess;
}
else if (intersectTriangle)
{
Triangle shape = triangles[index];
for (int i = 0; i < 3; i++)
{
n[i] = shape.v[0].normal[i] * bcoords[2] + shape.v[1].normal[i] * bcoords[0] + shape.v[2].normal[i] * bcoords[1];
diffuse[i] = shape.v[0].color_diffuse[i] * bcoords[2] + shape.v[1].color_diffuse[i] * bcoords[0] + shape.v[2].color_diffuse[i] * bcoords[1];
specular[i] = shape.v[0].color_specular[i] * bcoords[2] + shape.v[1].color_specular[i] * bcoords[0] + shape.v[2].color_specular[i] * bcoords[1];
}
shiny = shape.v[0].shininess * bcoords[2] + shape.v[1].shininess * bcoords[0] + shape.v[2].shininess * bcoords[1];
}
// initializes the shadow ray from the intersection point
double normalized_pos[3] = {lights[j].position[0] - intersection[0], lights[j].position[1] - intersection[1], lights[j].position[2] - intersection[2]};
double shadowIntersection[3] = {lights[j].position[0], lights[j].position[1], lights[j].position[2]};
double shadowOrigin[3] = {intersection[0] + 0.001 * n[0], intersection[1] + 0.001 * n[1], intersection[2] + 0.001 * n[2]};
bool shadow = false;
normalize(normalized_pos);
// checks if the shadow ray intersects with a shape
for (int i = 0; i < num_spheres[0]; i++)
if (intersectsSphere(spheres[i], shadowOrigin, normalized_pos, shadowIntersection)) shadow = true;
for (int i = 0; i < num_triangles[0]; i++)
if (intersectsTriangle(triangles[i], shadowOrigin, normalized_pos, shadowIntersection, other1)) shadow = true;
normalize(l);
normalize(n);
normalize(v);
multiply(n, 2 * dot(l, n), n1);
subtract(n1, l, r);
normalize(r); // calculates the reflection ray
// if there is no shadow at the point, calculates illumination using phong shading equation
if (!shadow)
{
for (int i = 0; i < 3; i++)
{
double a = diffuse[i] * fmax(0.0, dot(l, n));
double b = specular[i] * pow(fmax(0.0, dot(v, r)), shiny);
illumination[i] += lights[j].color[i] * (a + b);
illumination[i] = fmin(illumination[i], 1.0);
}
}
}
// uncomment this code below to recursively call tracer function on reflection ray
multiply(n, 2 * dot(v, n), n1);
subtract(n1, v, recursive_r);
double recursiveOrigin[3] = {intersection[0] + 0.01 * recursive_r[0], intersection[1] + 0.01 * recursive_r[1], intersection[2] + 0.01 * recursive_r[2]};
normalize(recursive_r);
double recurse_result[3] = {0,0,0};
trace3(recursiveOrigin,recursive_r,++num,recurse_result,triangles,spheres,lights,ambient_light,num_triangles,num_spheres,num_lights);
for (int i = 0; i < 3; i++)
{
trace_result[i] = (1 - specular[i]) * illumination[i] + specular[i] * recurse_result[i];
}
}
// Recursive function to perform ray tracing given a ray.
__device__
void trace(double o[3],double d[3],int num,double* trace_result,Triangle* triangles,Sphere* spheres,Light* lights,double* ambient_light,int* num_triangles,int* num_spheres,int* num_lights){
bool intersectTriangle = false, intersectSphere = false;
double bcoords[3], other1[3], intersection[3] = {1000.0, 1000.0, 1000.0};
int index = 0;
// looks for an intersection between the input ray and a shape
for (int i = 0; i < num_triangles[0]; i++) if (intersectsTriangle(triangles[i], o, d, intersection, bcoords)) intersectTriangle = true, index = i;
for (int i = 0; i < num_spheres[0]; i++) if (intersectsSphere(spheres[i], o, d, intersection)) intersectSphere = true, index = i;
// returns background color if no intersections or if reach max recursive call
if ((!intersectTriangle && !intersectSphere) || num > 2){
trace_result[0] = 1.0;
trace_result[1] = 1.0;
trace_result[2] = 1.0;
return;
}
double illumination[3] = {ambient_light[0], ambient_light[1], ambient_light[2]};
double l[3], n[3], n1[3], v[3], r[3], recursive_r[3], diffuse[3], specular[3], shiny;
// iterates through each light in the scene
for (int j = 0; j < num_lights[0]; j++)
{
for (int i = 0; i < 3; i++)
{
l[i] = lights[j].position[i] - intersection[i];
v[i] = o[i] - intersection[i];
}
if (intersectSphere)
{
for (int i = 0; i < 3; i++)
{
n[i] = intersection[i] - spheres[index].position[i];
diffuse[i] = spheres[index].color_diffuse[i];
specular[i] = spheres[index].color_specular[i];
}
shiny = spheres[index].shininess;
}
else if (intersectTriangle)
{
Triangle shape = triangles[index];
for (int i = 0; i < 3; i++)
{
n[i] = shape.v[0].normal[i] * bcoords[2] + shape.v[1].normal[i] * bcoords[0] + shape.v[2].normal[i] * bcoords[1];
diffuse[i] = shape.v[0].color_diffuse[i] * bcoords[2] + shape.v[1].color_diffuse[i] * bcoords[0] + shape.v[2].color_diffuse[i] * bcoords[1];
specular[i] = shape.v[0].color_specular[i] * bcoords[2] + shape.v[1].color_specular[i] * bcoords[0] + shape.v[2].color_specular[i] * bcoords[1];
}
shiny = shape.v[0].shininess * bcoords[2] + shape.v[1].shininess * bcoords[0] + shape.v[2].shininess * bcoords[1];
}
// initializes the shadow ray from the intersection point
double normalized_pos[3] = {lights[j].position[0] - intersection[0], lights[j].position[1] - intersection[1], lights[j].position[2] - intersection[2]};
double shadowIntersection[3] = {lights[j].position[0], lights[j].position[1], lights[j].position[2]};
double shadowOrigin[3] = {intersection[0] + 0.001 * n[0], intersection[1] + 0.001 * n[1], intersection[2] + 0.001 * n[2]};
bool shadow = false;
normalize(normalized_pos);
// checks if the shadow ray intersects with a shape
for (int i = 0; i < num_spheres[0]; i++)
if (intersectsSphere(spheres[i], shadowOrigin, normalized_pos, shadowIntersection)) shadow = true;
for (int i = 0; i < num_triangles[0]; i++)
if (intersectsTriangle(triangles[i], shadowOrigin, normalized_pos, shadowIntersection, other1)) shadow = true;
normalize(l);
normalize(n);
normalize(v);
multiply(n, 2 * dot(l, n), n1);
subtract(n1, l, r);
normalize(r); // calculates the reflection ray
// if there is no shadow at the point, calculates illumination using phong shading equation
if (!shadow)
{
for (int i = 0; i < 3; i++)
{
double a = diffuse[i] * fmax(0.0, dot(l, n));
double b = specular[i] * pow(fmax(0.0, dot(v, r)), shiny);
illumination[i] += lights[j].color[i] * (a + b);
illumination[i] = fmin(illumination[i], 1.0);
}
}
}
// uncomment this code below to recursively call tracer function on reflection ray
multiply(n, 2 * dot(v, n), n1);
subtract(n1, v, recursive_r);
double recursiveOrigin[3] = {intersection[0] + 0.01 * recursive_r[0], intersection[1] + 0.01 * recursive_r[1], intersection[2] + 0.01 * recursive_r[2]};
normalize(recursive_r);
double recurse_result[3] = {0,0,0};
trace2(recursiveOrigin,recursive_r,++num,recurse_result,triangles,spheres,lights,ambient_light,num_triangles,num_spheres,num_lights);
for (int i = 0; i < 3; i++)
{
trace_result[i] = (1 - specular[i]) * illumination[i] + specular[i] * recurse_result[i];
}
}
__global__ void draw_scene(double* result,Triangle* triangles,Sphere* spheres,Light* lights,double* ambient_light,int* num_triangles,int* num_spheres,int* num_lights)
{
double focalLength = 0.5 * WIDTH * sqrt(3.0) * 0.75;
double origin[3] = {0, 0, 0};
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
double direction[3] = {x - ((double) WIDTH / 2.0), y - ((double) HEIGHT / 2.0), -1 * focalLength};
normalize(direction);
double color[3] = {0,0,0};
trace(origin,direction,0,color,triangles,spheres,lights,ambient_light,num_triangles,num_spheres,num_lights);
result[(WIDTH*y + x)*3 + 0]=color[0]*255;
result[(WIDTH*y + x)*3 + 1]=color[1]*255;
result[(WIDTH*y + x)*3 + 2]=color[2]*255;
}
void parse_check(char *expected,char *found)
{
if(strcasecmp(expected,found))
{
// char error[100];
printf("Expected '%s ' found '%s '\n",expected,found);
printf("Parse error, abnormal abortion\n");
exit(0);
}
}
void parse_doubles(FILE*file, char *check, double p[3])
{
char str[100];
fscanf(file,"%s",str);
parse_check(check,str);
fscanf(file,"%lf %lf %lf",&p[0],&p[1],&p[2]);
// printf("%s %lf %lf %lf\n",check,p[0],p[1],p[2]);
}
void parse_rad(FILE*file,double *r)
{
char str[100];
fscanf(file,"%s",str);
parse_check((char *)"rad:",str);
fscanf(file,"%lf",r);
// printf("rad: %f\n",*r);
}
void parse_shi(FILE*file,double *shi)
{
char s[100];
fscanf(file,"%s",s);
parse_check((char *)"shi:",s);
fscanf(file,"%lf",shi);
// printf("shi: %f\n",*shi);
}
int loadScene(char *argv,Triangle* triangles,Sphere* spheres,Light* lights,double* ambient_light,int* num_triangles,int* num_spheres,int* num_lights)
{
FILE *file = fopen(argv,"r");
int number_of_objects;
char type[50];
int i;
Triangle t;
Sphere s;
Light l;
fscanf(file,"%i",&number_of_objects);
// printf("number of objects: %i\n",number_of_objects);
// char str[200];
parse_doubles(file,(char *)"amb:",ambient_light);
for(i=0;i < number_of_objects;i++)
{
fscanf(file,"%s\n",type);
// printf("%s\n",type);
if(strcasecmp(type,"triangle")==0)
{
// printf("found triangle\n");
int j;
for(j=0;j < 3;j++)
{
parse_doubles(file,(char *)"pos:",t.v[j].position);
parse_doubles(file,(char *)"nor:",t.v[j].normal);
parse_doubles(file,(char *)"dif:",t.v[j].color_diffuse);
parse_doubles(file,(char *)"spe:",t.v[j].color_specular);
parse_shi(file,&t.v[j].shininess);
}
if(num_triangles[0] == MAX_TRIANGLES)
{
printf("too many triangles, you should increase MAX_TRIANGLES!\n");
exit(0);
}
triangles[num_triangles[0]++] = t;
}
else if(strcasecmp(type,"sphere")==0)
{
// printf("found sphere\n");
parse_doubles(file,(char *)"pos:",s.position);
parse_rad(file,&s.radius);
parse_doubles(file,(char *)"dif:",s.color_diffuse);
parse_doubles(file,(char *)"spe:",s.color_specular);
parse_shi(file,&s.shininess);
if(num_spheres[0] == MAX_SPHERES) {
printf("too many spheres, you should increase MAX_SPHERES!\n");
exit(0);
}
spheres[num_spheres[0]++] = s;
}
else if(strcasecmp(type,"light")==0)
{
// printf("found light\n");
parse_doubles(file,(char *)"pos:",l.position);
parse_doubles(file,(char *)"col:",l.color);
if(num_lights[0] == MAX_LIGHTS){
printf("too many lights, you should increase MAX_LIGHTS!\n");
exit(0);
}
lights[num_lights[0]++] = l;
}
else{
printf("unknown type in scene description:\n%s\n",type);
exit(0);
}
}
fclose(file);
return 0;
}
void make_bitmap(double* rgbVals, char* fileToWrite)
{
typedef struct /**** BMP file header structure ****/
{
unsigned int bfSize; /* Size of file */
unsigned short bfReserved1; /* Reserved */
unsigned short bfReserved2; /* ... */
unsigned int bfOffBits; /* Offset to bitmap data */
} BITMAPFILEHEADER;
typedef struct /**** BMP file info structure ****/
{
unsigned int biSize; /* Size of info header */
int biWidth; /* Width of image */
int biHeight; /* Height of image */
unsigned short biPlanes; /* Number of color planes */
unsigned short biBitCount; /* Number of bits per pixel */
unsigned int biCompression; /* Type of compression to use */
unsigned int biSizeImage; /* Size of image data */
int biXPelsPerMeter; /* X pixels per meter */
int biYPelsPerMeter; /* Y pixels per meter */
unsigned int biClrUsed; /* Number of colors used */
unsigned int biClrImportant; /* Number of important colors */
} BITMAPINFOHEADER;
BITMAPFILEHEADER bfh;
BITMAPINFOHEADER bih;
/* Magic number for file. It does not fit in the header structure due to alignment requirements, so put it outside */
unsigned short bfType=0x4d42;
bfh.bfReserved1 = 0;
bfh.bfReserved2 = 0;
bfh.bfSize = 2+sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER)+640*480*3;
bfh.bfOffBits = 0x36;
bih.biSize = sizeof(BITMAPINFOHEADER);
bih.biWidth = WIDTH;
bih.biHeight = HEIGHT;
bih.biPlanes = 1;
bih.biBitCount = 24;
bih.biCompression = 0;
bih.biSizeImage = 0;
bih.biXPelsPerMeter = 5000;
bih.biYPelsPerMeter = 5000;
bih.biClrUsed = 0;
bih.biClrImportant = 0;
FILE *file = fopen(fileToWrite, "wb");
if (!file)
{
printf("Could not write file\n");
return;
}
/*Write headers*/
fwrite(&bfType,1,sizeof(bfType),file);
fwrite(&bfh, 1, sizeof(bfh), file);
fwrite(&bih, 1, sizeof(bih), file);
/*Write bitmap*/
for (int y=0; y<bih.biHeight; y++)
{
for (int x = 0; x < bih.biWidth; x++)
{
/*compute some pixel values*/
unsigned char r = rgbVals[(WIDTH*y + x)*3 + 0];
unsigned char g = rgbVals[(WIDTH*y + x)*3 + 1];
unsigned char b = rgbVals[(WIDTH*y + x)*3 + 2];
fwrite(&b, 1, 1, file);
fwrite(&g, 1, 1, file);
fwrite(&r, 1, 1, file);
}
}
fclose(file);
}
inline bool exists_file(char* name){
if(FILE *file = fopen(name, "r")){
fclose(file);
return true;
}
else return false;
}
int main (int argc, char ** argv)
{
for(int i=1; i<17; i++){
if (argc<3 || argc > 3){
printf ("usage: %s <scenefile> <bmp_name>\n", argv[0]);
exit(0);
}
char* fileToRead = argv[1];
char* fileToWrite = argv[2];
if(!exists_file(fileToRead)){
cout << "Input file does not exist.\n" << endl;
exit(0);
}
double* drawing;
Triangle* triangles;
Sphere* spheres;
Light* lights;
double* ambient_light;
int* num_triangles;
int* num_spheres;
int* num_lights;
cudaMallocManaged(&drawing, WIDTH*HEIGHT*3*sizeof(double));
cudaMallocManaged(&triangles, MAX_TRIANGLES*sizeof(Triangle));
cudaMallocManaged(&spheres, MAX_SPHERES*sizeof(Sphere));
cudaMallocManaged(&lights, MAX_LIGHTS*sizeof(Light));
cudaMallocManaged(&ambient_light, 3*sizeof(double));
cudaMallocManaged(&num_triangles, sizeof(int));
cudaMallocManaged(&num_spheres, sizeof(int));
cudaMallocManaged(&num_lights, sizeof(int));
loadScene(fileToRead,triangles,spheres,lights,ambient_light,num_triangles,num_spheres,num_lights);
//measure how long it takes to render the image
double time;
struct timespec start, stop;
if( clock_gettime(CLOCK_REALTIME, &start) == -1) { perror("clock gettime");}
dim3 BLOCK_DIM(i,i);
dim3 GRID_DIM(WIDTH/i,HEIGHT/i);
draw_scene<<<GRID_DIM, BLOCK_DIM>>>(drawing,triangles,spheres,lights,ambient_light,num_triangles,num_spheres,num_lights);
cudaDeviceSynchronize();
if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror("clock gettime");}
time = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9;
printf("Execution time for %s with %d x %d blocks: %f seconds.\n",fileToRead, time, i, i);
make_bitmap(drawing, fileToWrite);
cudaFree(drawing);
cudaFree(triangles);
cudaFree(spheres);
cudaFree(lights);
cudaFree(ambient_light);
cudaFree(num_triangles);
cudaFree(num_spheres);
cudaFree(num_lights);
}
}
|
5,982 | #include <stdio.h>
#include <sys/time.h>
#include <cuda.h>
#include <cfloat>
//VERSION 0.8 MODIFIED 10/25/16 12:34 by Jack
// The number of threads per blocks in the kernel
// (if we define it here, then we can use its value in the kernel,
// for example to statically declare an array in shared memory)
const int threads_per_block = 256;
// Forward function declarations
float GPU_vector_max(float *A, int N, int kernel_code, float *kernel_time, float *transfer_time);
float CPU_vector_max(float *A, int N);
float *get_random_vector(int N);
float *get_increasing_vector(int N);
float usToSec(long long time);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
void die(const char *message);
void checkError();
// Main program
int main(int argc, char **argv) {
//default kernel
int kernel_code = 1;
// Parse vector length and kernel options
int N;
if(argc == 2) {
N = atoi(argv[1]); // user-specified value
} else if (argc == 4 && !strcmp(argv[2], "-k")) {
N = atoi(argv[1]); // user-specified value
kernel_code = atoi(argv[3]);
printf("KERNEL_CODE %d\n", kernel_code);
} else {
die("USAGE: ./vector_max <vector_length> -k <kernel_code>");
}
// Seed the random generator (use a constant here for repeatable results)
srand(10);
// Generate a random vector
// You can use "get_increasing_vector()" for debugging
long long vector_start_time = start_timer();
float *vec = get_random_vector(N);
//float *vec = get_increasing_vector(N);
stop_timer(vector_start_time, "Vector generation");
// Compute the max on the GPU
float GPU_kernel_time;
float transfer_time;
long long GPU_start_time = start_timer();
float result_GPU = GPU_vector_max(vec, N, kernel_code, &GPU_kernel_time, &transfer_time);
long long GPU_time = stop_timer(GPU_start_time, "\t Total");
printf("\tTotal Kernel Time: %f sec\n", GPU_kernel_time);
// Compute the max on the CPU
long long CPU_start_time = start_timer();
float result_CPU = CPU_vector_max(vec, N);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU");
// Free vector
cudaFree(vec);
// Compute the speedup or slowdown
//// Not including data transfer
if (GPU_kernel_time > usToSec(CPU_time)) printf("\nCPU outperformed GPU kernel by %.2fx\n", (float) (GPU_kernel_time) / usToSec(CPU_time));
else printf("\nGPU kernel outperformed CPU by %.2fx\n", (float) usToSec(CPU_time) / (float) GPU_kernel_time);
//// Including data transfer
if (GPU_time > CPU_time) printf("\nCPU outperformed GPU total runtime (including data transfer) by %.2fx\n", (float) GPU_time / (float) CPU_time);
else printf("\nGPU total runtime (including data transfer) outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time);
// Check the correctness of the GPU results
int wrong = result_CPU != result_GPU;
// Report the correctness results
if(wrong) printf("GPU output %f did not match CPU output %f\n", result_GPU, result_CPU);
}
// A GPU kernel that computes the maximum value of a vector
// (each lead thread (threadIdx.x == 0) computes a single value
__global__ void vector_max_kernel(float *in, float *out, int N) {
// Determine the "flattened" block id and thread id
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x * block_id + threadIdx.x;
// A single "lead" thread in each block finds the maximum value over a range of size threads_per_block
float max = 0.0;
if (threadIdx.x == 0) {
//calculate out of bounds guard
//our block size will be 256, but our vector may not be a multiple of 256!
int end = threads_per_block;
if(thread_id + threads_per_block > N)
end = N - thread_id;
//grab the lead thread's value
max = in[thread_id];
//grab values from all other threads' locations
for(int i = 1; i < end; i++) {
//if larger, replace
if(max < in[thread_id + i])
max = in[thread_id + i];
}
out[block_id] = max;
}
}
/////////////////////////////////////////////
// COPY KERNEL ONE AND CREATE NEW KERNELS HERE
/////////////////////////////////////////////
// Returns the maximum value within a vector of length N
float GPU_vector_max(float *in_CPU, int N, int kernel_code, float *kernel_runtime, float *transfer_runtime) {
long long transfer_time = 0;
long long kernel_time = 0;
int vector_size = N * sizeof(float);
// Allocate CPU memory for the result
float *out_CPU;
cudaMallocHost((void **) &out_CPU, vector_size * sizeof(float));
if (out_CPU == NULL) die("Error allocating CPU memory");
// Allocate GPU memory for the inputs and the result
long long memory_start_time = start_timer();
float *in_GPU, *out_GPU;
if (cudaMalloc((void **) &in_GPU, vector_size) != cudaSuccess) die("Error allocating GPU memory");
if (cudaMalloc((void **) &out_GPU, vector_size) != cudaSuccess) die("Error allocating GPU memory");
// Transfer the input vectors to GPU memory
cudaMemcpy(in_GPU, in_CPU, vector_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\nGPU:\t Transfer to GPU");
// Determine the number of thread blocks in the x- and y-dimension
int num_blocks = (int) ((float) (N + threads_per_block - 1) / (float) threads_per_block);
int max_blocks_per_dimension = 65535;
int num_blocks_y = (int) ((float) (num_blocks + max_blocks_per_dimension - 1) / (float) max_blocks_per_dimension);
int num_blocks_x = (int) ((float) (num_blocks + num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
// Execute the kernel to compute the vector sum on the GPU
long long kernel_start_time;
kernel_start_time = start_timer();
switch(kernel_code){
case 1 :
vector_max_kernel <<< grid_size , threads_per_block >>> (in_GPU, out_GPU, N);
break;
case 2 :
//LAUNCH KERNEL FROM PROBLEM 2 HERE
die("KERNEL 2 NOT IMPLEMENTED YET\n");
break;
case 3 :
//LAUNCH KERNEL FROM PROBLEM 3 HERE
die("KERNEL 3 NOT IMPLEMENTED YET\n");
break;
case 4 :
//LAUNCH KERNEL FROM PROBLEM 4 HERE
die("KERNEL 4 NOT IMPLEMENTED YET\n");
break;
default :
die("INVALID KERNEL CODE\n");
}
cudaDeviceSynchronize(); // this is only needed for timing purposes
kernel_time += stop_timer(kernel_start_time, "\t Kernel execution");
checkError();
// Transfer the result from the GPU to the CPU
memory_start_time = start_timer();
//copy C back
cudaMemcpy(out_CPU, out_GPU, vector_size, cudaMemcpyDeviceToHost);
checkError();
cudaDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\tTransfer from GPU");
// Free the GPU memory
cudaFree(in_GPU);
cudaFree(out_GPU);
float max = out_CPU[0];
cudaFree(out_CPU);
// fill input pointers with ms runtimes
*kernel_runtime = usToSec(kernel_time);
*transfer_runtime = usToSec(transfer_time);
//return a single statistic
return max;
}
// Returns the maximum value within a vector of length N
float CPU_vector_max(float *vec, int N) {
// find the max
float max;
max = vec[0];
for (int i = 1; i < N; i++) {
if(max < vec[i]) {
max = vec[i];
}
}
// Return a single statistic
return max;
}
// Returns a randomized vector containing N elements
float *get_random_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V;
cudaMallocHost((void **) &V, N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = (float) rand() / (float) rand();
// Return the randomized vector
return V;
}
float *get_increasing_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V;
cudaMallocHost((void **) &V, N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = (float) i;
// Return the randomized vector
return V;
}
void checkError() {
// Check for kernel errors
cudaError_t error = cudaGetLastError();
if (error) {
char message[256];
sprintf(message, "CUDA error: %s", cudaGetErrorString(error));
die(message);
}
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// converts a long long ns value to float seconds
float usToSec(long long time) {
return ((float)time)/(1000000);
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
float elapsed = usToSec(end_time - start_time);
printf("%s: %.5f sec\n", name, elapsed);
return end_time - start_time;
}
// Prints the specified message and quits
void die(const char *message) {
printf("%s\n", message);
exit(1);
}
|
5,983 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <vector>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define BLOCK_SIZE 1024
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void multiplyBy2(int* data, unsigned int n) {
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
data[tid] = 2 * data[tid];
}
}
template<typename T>
std::vector<T>* getUniqueValues(std::vector<T>* input) {
std::vector<T>* uniqueValues = new std::vector<T>(*input);
std::sort(uniqueValues->begin(), uniqueValues->end());
auto ip = std::unique(uniqueValues->begin(), uniqueValues->end());
auto begin = uniqueValues->begin();
uniqueValues->resize(std::distance(begin, ip));
return uniqueValues;
}
template<typename T>
thrust::host_vector<T>* getHostVector(std::vector<T>* input) {
thrust::host_vector<T>* host_vector = new thrust::host_vector<T>();
for (auto it = input->begin(); it != input->end(); ++it) {
host_vector->push_back(*it);
}
return host_vector;
}
__host__ __device__ void variations_without_repetitions_count(int n, int k, unsigned long long* result) {
if (k > n) {
*result = 1;
return;
}
*result = 1;
for (int i = n; i > n - k; i--) {
*result *= i;
}
}
__host__ __device__ void variation(int n, int k, int variationNumber, int* result) {
bool* isTaken = new bool[n];
for (int i = 0; i < n; i++) {
isTaken[i] = false;
}
for (int x = 0; x < k ; x++) {
unsigned long long v = 0;
variations_without_repetitions_count(n - x - 1, k - x - 1, &v);
auto t = variationNumber / v;
int searchedPosition = -1;
int realPosition = 0;
for (int i = 0; i < n; i++) {
if (!isTaken[i]) {
searchedPosition++;
if (t == searchedPosition) {
realPosition = i;
break;
}
}
}
isTaken[realPosition] = true;
result[x] = realPosition;
variationNumber %= v;
}
}
__global__ void findSubstitution(
char* patternValues, int patternValuesSize,
int* seqValues, int seqValuesSize,
char* pattern, int patternSize,
int* seq, int seqSize,
int* result, unsigned long long variationCount) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index > variationCount) return;
int* variationResult = new int[patternValuesSize];
variation(seqValuesSize, patternValuesSize, index, variationResult);
int* patternWithValues = new int[patternSize];
for (int i = 0; i < patternValuesSize; i++) {
for (int j = 0; j < patternSize; j++) {
if (patternValues[i] == pattern[j]) {
patternWithValues[j] = seqValues[variationResult[i]];
}
}
}
int patternIndex = 0;
for (int i = 0; i < seqSize && patternIndex < patternSize; i++) {
if (seq[i] == patternWithValues[patternIndex]) {
patternIndex++;
}
}
if (patternIndex == patternSize) {
result[index] = 1;
}
else {
result[index] = 0;
}
}
int main()
{
std::vector<int> seq = { 1,2, 4, 3, 5, 3, 6, 2, 1 };
std::vector<char> pattern = { 'a', 'b', 'b', 'a' };
thrust::host_vector<char>* patternValues = getHostVector(getUniqueValues(&pattern));
thrust::host_vector<char>* thrustPattern = getHostVector(&pattern);
thrust::host_vector<int>* seqValues = getHostVector(getUniqueValues(&seq));
thrust::host_vector<int>* thrustSeq = getHostVector(&seq);
thrust::host_vector<int>* result = new thrust::host_vector<int>();
thrust::device_vector<char>* devPatternValues = new thrust::device_vector<char>();
thrust::device_vector<char>* devThrustPattern = new thrust::device_vector<char>();
thrust::device_vector<int>* devSeqValues = new thrust::device_vector<int>();
thrust::device_vector<int>* devThrustSeq = new thrust::device_vector<int>();
thrust::device_vector<int>* devResult = new thrust::device_vector<int>();
unsigned long long variationCount = 0;
variations_without_repetitions_count(seqValues->size(), patternValues->size(), &variationCount);
int gridSize = variationCount / BLOCK_SIZE;
if (gridSize < 1) {
gridSize = 1;
}
devPatternValues->resize(patternValues->size());
devThrustPattern->resize(thrustPattern->size());
devSeqValues->resize(seqValues->size());
devThrustSeq->resize(thrustSeq->size());
result->resize(variationCount);
devResult->resize(variationCount);
*devPatternValues = *patternValues;
*devThrustPattern = *thrustPattern;
*devSeqValues = *seqValues;
*devThrustSeq = *thrustSeq;
*devResult = *result;
findSubstitution <<< gridSize, BLOCK_SIZE >>> (
devPatternValues->data().get(), devPatternValues->size(),
devSeqValues->data().get(), devSeqValues->size(),
devThrustPattern->data().get(), devThrustPattern->size(),
devThrustSeq->data().get(), devThrustSeq->size(),
devResult->data().get(), variationCount);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
std::cout << "cuda error: " << cudaGetErrorString(err) << std::endl;
return 1;
}
*result = *devResult;
for (int i = 0; i < result->size(); i++) {
if ((*result)[i] != 0) {
int* variationResult = new int[patternValues->size()];
variation(seqValues->size(), patternValues->size(), i, variationResult);
for (int i = 0; i < patternValues->size(); i++) {
std::cout << (*patternValues)[i] << "=" << (*seqValues)[variationResult[i]] << " ";
}
std::cout << std::endl;
}
}
return 0;
} |
5,984 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <stdint.h>
__device__ uint8_t merge_colors(uint8_t a, uint8_t b, uint8_t c){
return (a+b+c)/3;
}
__device__ float blur_effect(size_t x, size_t y) {
float xp = 1920/2;
float yp = 1080/2;
float v = ((x-xp)*(x-xp) + (y-yp)*(y-yp)) / (800*800);
v = 1 - 1 /(1 + v);
return v > 1 ? 1 : v;
}
__global__ void process_color(size_t width, size_t height,
uint8_t *a, uint8_t *b, uint8_t *c, uint8_t *res) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx > (width * 3 * height)) return;
int x = idx % (width*3);
int y = idx / (width*3);
int radius = 40 * blur_effect(x/3, y);
int sum = 0;
int count = 0;
for (int i = x - radius*3; i <= x + radius*3; i += 3) {
for (int j = y - radius; j <= y + radius; j += 1) {
if ((i < 0) || (i >= width*3) || (j < 0) || (j >= height)) {
continue;
}
if((i-x)*(i-x)/9 + (j-y)*(j-y) > radius*radius){
continue;
}
int l_idx = i + j * width * 3;
sum += merge_colors(a[l_idx], b[l_idx], c[l_idx]);
++count;
}
}
bool is_red = idx % 3 == 0;
bool is_green = idx % 3 == 1;
bool is_blue = idx % 3 == 2;
float be = 1.f;
res[idx] = (uint8_t)((sum / count) * be + b[idx] * (1 - be));
}
void read_image(const char *path, uint8_t *data,
size_t start, size_t size) {
FILE *fp = fopen(path, "r");
if (fp == NULL) {
perror("Error while opening the file.\n");
exit(EXIT_FAILURE);
}
fseek(fp, start, SEEK_SET);
for (int i = 0; i < size; ++i) {
data[i] = getc(fp);
}
fclose(fp);
}
void write_image(const char *path, uint8_t *data,
size_t start, size_t size) {
FILE *fp = fopen(path, "r+");
if (fp == NULL) {
perror("Error while opening the file.\n");
exit(EXIT_FAILURE);
}
if (fseek(fp, start, SEEK_SET) != 0) {
perror("Error while seeking.\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < size; ++i) {
putc(data[i], fp);
}
fclose(fp);
}
void copy_header(const char *from, const char *to, size_t till) {
FILE *fp_from = fopen(from, "r");
FILE *fp_to = fopen(to, "w");
if (fp_from == NULL || fp_to == NULL) {
perror("Error while opening the file.\n");
exit(EXIT_FAILURE);
}
if (fseek(fp_to, 0, SEEK_SET) != 0) {
perror("Error while seeking.\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < till; ++i) {
putc(getc(fp_from), fp_to);
}
fclose(fp_from);
fclose(fp_to);
}
int main(void) {
srand(time(NULL));
printf("start\n");
int count;
int err;
if ((err = cudaGetDeviceCount(&count)) != cudaSuccess) {
printf("error: %d\n", err);
exit(1);
}
printf("count: %d\n", count);
for (int i = 0; i < count; ++i) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("%d maxTexture1D: %d\n", i, prop.maxTexture1D);
printf("%d maxTexture2D: %d\n", i, prop.maxTexture2D);
printf("%d maxTexture3D: %d\n", i, prop.maxTexture3D);
printf("%d name: %s\n", i, prop.name);
}
size_t image_size = sizeof(uint8_t) * 3 * 1920 * 1080;
uint8_t *a_img = (uint8_t *) malloc(image_size);
uint8_t *b_img = (uint8_t *) malloc(image_size);
uint8_t *c_img = (uint8_t *) malloc(image_size);
uint8_t *result_img = (uint8_t *) malloc(image_size);
uint8_t *d_a, *d_b, *d_c, *d_result;
cudaMalloc((void **) &d_a, image_size);
cudaMalloc((void **) &d_b, image_size);
cudaMalloc((void **) &d_c, image_size);
cudaMalloc((void **) &d_result, image_size);
size_t start = 0x7a;
read_image("input/a.bmp", a_img, start, image_size);
read_image("input/b.bmp", b_img, start, image_size);
read_image("input/c.bmp", c_img, start, image_size);
read_image("input/a.bmp", result_img, start, image_size);
cudaMemcpy(d_a, a_img, image_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b_img, image_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c_img, image_size, cudaMemcpyHostToDevice);
printf("STARTED\n");
clock_t t;
t = clock();
process_color <<< 1920 * 1080 * 3 / 512 + 1, 512 >>>
(1920, 1080, d_a, d_b, d_c, d_result);
cudaDeviceSynchronize();
t = clock() - t;
double time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("ENDED\n");
printf("time: %lf pic/s\n", 1000.f / time_taken);
cudaMemcpy(result_img, d_result, image_size,
cudaMemcpyDeviceToHost);
copy_header("input/a.bmp", "input/result.bmp", start);
write_image("input/result.bmp", result_img, start, image_size);
free(a_img);
free(b_img);
free(c_img);
free(result_img);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_result);
return 0;
}
|
5,985 | #include "includes.h"
__global__ void conductance_calculate_postsynaptic_current_injection_kernel(int * d_presynaptic_neuron_indices, int* d_postsynaptic_neuron_indices, float* d_reversal_potentials_Vhat, float* d_neurons_current_injections, size_t total_number_of_synapses, float * d_membrane_potentials_v, float * d_synaptic_conductances_g){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < total_number_of_synapses) {
float reversal_potential_Vhat = d_reversal_potentials_Vhat[idx];
int postsynaptic_neuron_index = d_postsynaptic_neuron_indices[idx];
float membrane_potential_v = d_membrane_potentials_v[postsynaptic_neuron_index];
float synaptic_conductance_g = d_synaptic_conductances_g[idx];
float component_for_sum = synaptic_conductance_g * (reversal_potential_Vhat - membrane_potential_v);
if (component_for_sum != 0.0) {
atomicAdd(&d_neurons_current_injections[postsynaptic_neuron_index], component_for_sum);
}
idx += blockDim.x * gridDim.x;
}
__syncthreads();
} |
5,986 | # include <stdio.h>
# include <stdint.h>
# include "cuda_runtime.h"
//compile nvcc -arch=sm_35 *.cu -o test
__global__ void global_latency (const unsigned int * __restrict__ my_array, int array_length, int iterations, unsigned int * duration, unsigned int *index);
void parametric_measure_global(int N, int iterations, int stride);
void measure_global();
int main(){
cudaSetDevice(0);
measure_global();
cudaDeviceReset();
return 0;
}
void measure_global() {
int N, iterations, stride;
//stride in element
iterations = 1;
stride = 32/sizeof(unsigned int); //stride, in element
for (N = 12*256; N <= 60*256; N+=stride) {
printf("\n=====%10.4f KB array, warm TLB, record 1024 element====\n", sizeof(unsigned int)*(float)N/1024);
printf("Stride = %d element, %d byte\n", stride, stride * sizeof(unsigned int));
parametric_measure_global(N, iterations, stride );
printf("===============================================\n\n");
}
}
void parametric_measure_global(int N, int iterations, int stride) {
cudaDeviceReset();
cudaError_t error_id;
int i;
unsigned int * h_a;
/* allocate arrays on CPU */
h_a = (unsigned int *)malloc(sizeof(unsigned int) * (N));
unsigned int * d_a;
/* allocate arrays on GPU */
error_id = cudaMalloc ((void **) &d_a, sizeof(unsigned int) * (N));
if (error_id != cudaSuccess) {
printf("Error 1.0 is %s\n", cudaGetErrorString(error_id));
}
/* initialize array elements on CPU with pointers into d_a. */
for (i = 0; i < N; i++) {
//original:
h_a[i] = (i+stride)%N;
}
/* copy array elements from CPU to GPU */
error_id = cudaMemcpy(d_a, h_a, sizeof(unsigned int) * N, cudaMemcpyHostToDevice);
if (error_id != cudaSuccess) {
printf("Error 1.1 is %s\n", cudaGetErrorString(error_id));
}
unsigned int *h_index = (unsigned int *)malloc(sizeof(unsigned int)*1024);
unsigned int *h_timeinfo = (unsigned int *)malloc(sizeof(unsigned int)*1024);
unsigned int *duration;
error_id = cudaMalloc ((void **) &duration, sizeof(unsigned int)*1024);
if (error_id != cudaSuccess) {
printf("Error 1.2 is %s\n", cudaGetErrorString(error_id));
}
unsigned int *d_index;
error_id = cudaMalloc( (void **) &d_index, sizeof(unsigned int)*1024 );
if (error_id != cudaSuccess) {
printf("Error 1.3 is %s\n", cudaGetErrorString(error_id));
}
cudaThreadSynchronize ();
/* launch kernel*/
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
global_latency <<<Dg, Db>>>(d_a, N, iterations, duration, d_index);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error kernel is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaThreadSynchronize ();
error_id = cudaMemcpy((void *)h_timeinfo, (void *)duration, sizeof(unsigned int)*1024, cudaMemcpyDeviceToHost);
if (error_id != cudaSuccess) {
printf("Error 2.0 is %s\n", cudaGetErrorString(error_id));
}
error_id = cudaMemcpy((void *)h_index, (void *)d_index, sizeof(unsigned int)*1024, cudaMemcpyDeviceToHost);
if (error_id != cudaSuccess) {
printf("Error 2.1 is %s\n", cudaGetErrorString(error_id));
}
cudaThreadSynchronize ();
for(i=0;i<1024;i++)
printf("%d\t %d\n", h_index[i], h_timeinfo[i]);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_index);
cudaFree(duration);
/*free memory on CPU */
free(h_a);
free(h_index);
free(h_timeinfo);
cudaDeviceReset();
}
__global__ void global_latency (const unsigned int * __restrict__ my_array, int array_length, int iterations, unsigned int * duration, unsigned int *index) {
unsigned int start_time, end_time;
unsigned int j = 0;
__shared__ unsigned int s_tvalue[1024];
__shared__ unsigned int s_index[1024];
int k;
for(k=0; k<1024; k++){
s_index[k] = 0;
s_tvalue[k] = 0;
}
//first round
for (k = 0; k < 16*iterations*1024; k++)
j = __ldg(&my_array[j]);
//second round
for (k = 0; k < iterations*1024; k++) {
start_time = clock();
j = __ldg(&my_array[j]);
s_index[k]= j;
end_time = clock();
s_tvalue[k] = end_time-start_time;
}
for(k=0; k<1024; k++){
index[k]= s_index[k];
duration[k] = s_tvalue[k];
}
}
|
5,987 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <iomanip>
#include <cmath>
#include <stdio.h>
using namespace std;
const double eps = 1e-12; // staa przyblienia zera
__global__ void addAndMulGauss(double *bj, double *ai, double m)
{
int i = threadIdx.x;
bj[i] += m * ai[i];
}
// Funkcja realizuje algorytm eliminacji Gaussa
//---------------------------------------------
bool gaussWithCuda(int n, double ** AB, double * X,unsigned int size)
{
int i,j,k;
double m,s;
double *dev_ai = 0;
double *dev_bj = 0;
double dev_m = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_m, sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_ai, size * sizeof(double));
cudaStatus = cudaMalloc((void**)&dev_bj, size * sizeof(double));
// eliminacja wspczynnikw
for(i = 0; i < n - 1; i++)
{
for(j = i + 1; j < n; j++)
{
if(fabs(AB[i][i]) < eps) return false;
m = -AB[j][i] / AB[i][i];
cudaStatus = cudaMemcpy(dev_ai, AB[i], size * sizeof(double), cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(dev_bj, AB[j], size * sizeof(double), cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(&dev_m, &m, sizeof(double), cudaMemcpyHostToDevice);
// for(k = i + 1; k <= n; k++)
//AB[j][k] += m * AB[i][k]; //zrownoleglenie
addAndMulGauss<<<1, size>>>(dev_bj, dev_ai, dev_m);
cudaStatus = cudaDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(AB[j], dev_bj, size * sizeof(double), cudaMemcpyDeviceToHost);
}
}
// wyliczanie niewiadomych
for(i = n - 1; i >= 0; i--)
{
s = AB[i][n];
for(j = n - 1; j >= i + 1; j--)
s -= AB[i][j] * X[j];
if(fabs(AB[i][i]) < eps) return false;
X[i] = s / AB[i][i]; //zrownoleglenia
}
return true;
}
// Funkcja realizuje algorytm eliminacji Gaussa
//---------------------------------------------
bool gauss(int n, double ** AB, double * X)
{
int i,j,k;
double m,s;
// eliminacja wspczynnikw
for(i = 0; i < n - 1; i++)
{
for(j = i + 1; j < n; j++)
{
if(fabs(AB[i][i]) < eps) return false;
m = -AB[j][i] / AB[i][i];
for(k = i + 1; k <= n; k++)
AB[j][k] += m * AB[i][k]; //zrownoleglenie
}
}
// wyliczanie niewiadomych
for(i = n - 1; i >= 0; i--)
{
s = AB[i][n];
for(j = n - 1; j >= i + 1; j--)
s -= AB[i][j] * X[j];
if(fabs(AB[i][i]) < eps) return false;
X[i] = s / AB[i][i];
}
return true;
}
// Program gwny
//---------------
int main()
{
clock_t start, koniec;
double **AB, *X;
int n,i,j;
cout << setprecision(4) << fixed;
// odczytujemy liczb niewiadomych
cin >> n;
// tworzymy macierze AB i X
AB = new double * [n];
X = new double [n];
for(i = 0; i < n; i++) AB[i] = new double[n + 1];
// odczytujemy dane dla macierzy AB
for(i = 0; i < n; i++)
for(j = 0; j <= n; j++) AB[i][j] = rand()%50+1;//cin >> AB[i][j];
start = clock(); // biecy czas systemowy w ms
if(gauss(n,AB,X))
{
koniec = clock(); // biecy czas systemowy w ms
long delta=(long)(koniec - start);//czas dziaa w ms
cout <<"czas wykonania: "<< delta << " ms\n";
/*for(i = 0; i < n; i++)
cout << "x" << i + 1 << " = " << setw(9) << X[i]
<< endl; */
}
else
cout << "DZIELNIK ZERO\n";
for(i = 0; i < n; i++)
for(j = 0; j <= n; j++) AB[i][j] = rand()%50+1;//cin >> AB[i][j];
start = clock(); // biecy czas systemowy w ms
if(gaussWithCuda(n,AB,X,n))
{
koniec = clock(); // biecy czas systemowy w ms
long delta=(long)(koniec - start);//czas dziaa w ms
cout <<"czas wykonania: "<< delta << " ms\n";
/*for(i = 0; i < n; i++)
cout << "x" << i + 1 << " = " << setw(9) << X[i]
<< endl; */
}
else
cout << "DZIELNIK ZERO\n";
// usuwamy macierze z pamici
for(i = 0; i < n; i++) delete [] AB[i];
delete [] AB;
delete [] X;
getchar();
getchar();
return 0;
} |
5,988 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/********************************************************************************
This CUDA program demonstrates how to crack an encrypted password using a simple
"brute force" algorithm. In this program. In this program a password consisting
of two uppercase letters and two digit integers are cracked.
Compile with:
nvcc task3_partA_1.cu -o task3_partA_1
To run:
./task3_partA_1
If you want to analyse the results then use the redirection operator to send
output to a file that you can be viewed using an editor
./task3_partA_1 > task3_partA_1_results.txt
Author: Sasmita Gurung
University Email: S.Gurung12@wlv.ac.uk
**********************************************************************************/
__device__ int match(char *check) {
char plainPassword_1[] = "BF9999";
char plainPassword_2[] = "CN9898";
char plainPassword_3[] = "BT9893";
char plainPassword_4[] = "MA5369";
char *a = check;
char *b = check;
char *c = check;
char *d = check;
char *p1 = plainPassword_1;
char *p2 = plainPassword_2;
char *p3 = plainPassword_3;
char *p4 = plainPassword_4;
while(*a == *p1) {
if(*a == '\0')
{
printf("(Found) Password cracked is: %s\n",plainPassword_1);
break;
}
a++;
p1++;
}
while(*b == *p2) {
if(*b == '\0')
{
printf("(Found) Password cracked is: %s\n",plainPassword_2);
break;
}
b++;
p2++;
}
while(*c == *p3) {
if(*c == '\0')
{
printf("(Found) Password cracked is: %s\n",plainPassword_3);
break;
}
c++;
p3++;
}
while(*d == *p4) {
if(*d == '\0')
{
printf("(Found) Password cracked is: %s\n",plainPassword_4);
return 1;
}
d++;
p4++;
}
return 0;
}
__global__ void kernel() {
char w,x,y,z;
char password[7];
password[6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(w='0'; w<='9'; w++){
for(x='0'; x<='9'; x++){
for(y='0'; y<='9'; y++){
for(z='0'; z<='9'; z++){
password[2] = w;
password[3] = x;
password[4] = y;
password[5] = z;
if(match(password)) {
printf("password found: %s\n", password);
}
else {
printf("(Processing) Brute Force Tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
5,989 | #include <stdio.h>
#include <cuda.h>
__global__ void helloKernel() {
printf("Hello from thread %d of block %d\n!", threadIdx.x, blockIdx.x);
}
int main() {
printf("Hello from the CPU\n");
helloKernel <<<2, 4>>> ();
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(error));
exit(-1);
};
return 0;
}
|
5,990 | //#define REARRANGED_DOMAIN
__global__ void get_absolute(
int N,
double xllcorner,
double yllcorner,
double * points)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if (k >= N )
return;
#ifndef REARRANGED_DOMAIN
int k2 = k*2;
points[k2] += xllcorner;
points[k2 + 1] += yllcorner;
#else
points[k] += xllcorner;
points[k + N] += yllcorner;
#endif
}
|
5,991 | #include "includes.h"
__global__ void takeLog(float* input, float* env, int nhalf) {
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = i<<1;
if (i < nhalf) {
env[i] = log(input[j] > 0.0 ? input[j] : 1e-20); // take the log of the amplitudes
}
} |
5,992 | #include "includes.h"
__global__ void reduce6(const float* g_idata, float* g_odata, float* g_omask, unsigned int n) {
extern __shared__ float sharedData[];
float* sdata = &sharedData[0];
float* smask = &sharedData[blockDim.x];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
unsigned int gridSize = blockDim.x * 2 * gridDim.x;
sdata[tid] = 0;
smask[tid] = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
sdata[tid] += g_idata[i];
smask[tid] += (g_idata[i] > 0 ? 1 : 0);
if (i + blockDim.x < n) {
sdata[tid] += g_idata[i + blockDim.x];
smask[tid] += (g_idata[i + blockDim.x] > 0 ? 1 : 0);
}
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 512) {
if (tid < 256) {
sdata[tid] += sdata[tid + 256];
smask[tid] += smask[tid + 256];
}
__syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128) {
sdata[tid] += sdata[tid + 128];
smask[tid] += smask[tid + 128];
}
__syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64) {
sdata[tid] += sdata[tid + 64];
smask[tid] += smask[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
if (blockDim.x >= 64) {
sdata[tid] += sdata[tid + 32];
smask[tid] += smask[tid + 32];
}
if (blockDim.x >= 32) {
sdata[tid] += sdata[tid + 16];
smask[tid] += smask[tid + 16];
}
if (blockDim.x >= 16) {
sdata[tid] += sdata[tid + 8];
smask[tid] += smask[tid + 8];
}
if (blockDim.x >= 8) {
sdata[tid] += sdata[tid + 4];
smask[tid] += smask[tid + 4];
}
if (blockDim.x >= 4) {
sdata[tid] += sdata[tid + 2];
smask[tid] += smask[tid + 2];
}
if (blockDim.x >= 2) {
sdata[tid] += sdata[tid + 1];
smask[tid] += smask[tid + 1];
}
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
g_omask[blockIdx.x] = smask[0];
}
} |
5,993 | #include "cuda.h"
#include <stdio.h>
#define imin(a,b) (a<b?a:b)
// const int N = 33 * 1024;
const int N = 100;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin( 32, (N+threadsPerBlock-1) / threadsPerBlock );
__global__ void reduction( float *in, float *out, int n ) {
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
cache[cacheIndex] = (tid < n)? in[cacheIndex] : 0;
__syncthreads();
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
out[blockIdx.x] = cache[0];
}
__global__ void add( float *a, float *b) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < N)
a[tid] = a[tid] + b[tid];
}
int main (void){
FILE *aqv = NULL;
aqv = fopen("entrada.txt", "r");
if(aqv == NULL)
printf("\nErro ao abrir aqv\n");
double *d = (double *) calloc(N, sizeof(double));
double *d_old = (double *) calloc(N, sizeof(double));
float *f = (float *) calloc(N, sizeof(float));
float *f_old = (float *) calloc(N, sizeof(float));
/**
*
* inicia valores
*
*/
int i;
for(i = 0; i < N; i++){
fscanf (aqv, "%f", &f[i]);
f_old[i] = 1.0;
}
/**
*
* calcula a soma dos vetores
* Efetua a soma da matriz f
*/
float soma = 0.0;
float temp = 0.0;
for(i = 0; i < N; i++){
temp = f[i] + f_old[i];
soma += temp;
}
/**
*
* Pula o ponteiro de leitura do arquivo
* devolta para o inicio do aqv
*
*/
fseek ( aqv, 0, SEEK_SET);
/**
*
* inicia valores para o tipo DOUBLE
*/
for(i = 0; i < N; i++){
fscanf (aqv, "%lf", &d[i]);
d_old[i] = 1.0;
}
/**
*
* calcula a soma dos vetores
* Efetua a soma da matriz f
*/
double somad = 0.0;
double tempd = 0.0;
for(i = 0; i < N; i++){
tempd = d[i] + d_old[i];
somad += tempd;
}
printf("\ncom double SERIAL - Valor da soma = %f\n", somad);
printf("\ncom double SERIAL - Valor da media = %f\n", somad/N);
printf("\ncom float SERIAL - Valor da soma = %f\n", soma);
printf("\ncom float SERIAL - Valor da media = %f\n\n\n", soma/N);
float *partial_c = NULL;
float *dev_f = NULL, *dev_f_old = NULL, *dev_partial_c = NULL;
partial_c = (float*) calloc( blocksPerGrid, sizeof(float) );
cudaMalloc( (void**)&dev_f, N * sizeof(float) );
cudaMalloc( (void**)&dev_f_old, N * sizeof(float) );
cudaMalloc( (void**)&dev_partial_c, blocksPerGrid * sizeof(float) );
cudaMemcpy( dev_f, f, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( dev_f_old, f_old, N*sizeof(float), cudaMemcpyHostToDevice );
add<<<blocksPerGrid,threadsPerBlock>>>( dev_f, dev_f_old );
cudaDeviceSynchronize();
reduction<<<blocksPerGrid,threadsPerBlock>>>( dev_f, dev_partial_c, 100 );
cudaMemcpy( partial_c, dev_partial_c,
blocksPerGrid*sizeof(float),
cudaMemcpyDeviceToHost );
soma = 0.0;
double teste = 0.0;
for ( i=0; i<blocksPerGrid; i++) {
soma += partial_c[i];
teste += partial_c[i];
}
printf("\n com double CUDA - Valor da soma = %f\n", teste);
printf("\n com double CUDA - Valor da media = %f\n", teste/N);
printf("\n CUDA - Valor da soma = %f\n", soma);
printf("\n CUDA - Valor da media = %f\n", soma/N);
cudaFree( dev_f );
cudaFree( dev_partial_c );
free(f);
free(partial_c);
return 0;
} |
5,994 | #include <thrust/complex.h>
using namespace thrust;
extern "C"
{
__global__ void CUDAlogkernel(const double a, const double b, const int nu, const double *u, double *x, double *y, double *ret)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int n = sizeof(x)/sizeof(x[0]);
const double pi = M_PI;
const double lengthd = abs(b-a);
const double C = 0.5*lengthd;
complex<double> *z,*yv,*yk,*ykp1;
z = new complex<double>[n];
yv = new complex<double>[n];
yk = new complex<double>[n];
ykp1 = new complex<double>[n];
z[i] = complex<double>(x[i],y[i]);
z[i] = (a + b - 2.0*z[i])/(a - b); // tocanonical(u,z)
if (z[i].real() <= 1.0 && z[i].real() >= -1.0 && abs(z[i].imag()) <= 2.0e-14) {
yv[i] = z[i]+complex<double>(0.0,1.0)*sqrt(1.0-z[i])*sqrt(z[i]+1.0);
}
else {
yv[i] = z[i] - sqrt(z[i]-1.0)*sqrt(z[i]+1.0); // updownjoukowskyinverse(true,z)
}
yk[i] = yv[i];
ykp1[i] = yk[i]*yk[i];
if ( nu >= 0 ) {
ret[i] = -u[0]*log(abs(2.0*yk[i]/C)); // -logabs(2y/C)
if ( nu >= 1 ) {
ret[i] += -u[1]*yk[i].real(); // -real(yk)
if ( nu >= 2 ) {
ret[i] += u[2]*(log(abs(2.0*yk[i]/C))-0.5*ykp1[i].real()); // -ret[1]-.5real(ykp1)
if ( nu >= 3) {
for (int nun = 3; nun<nu; nun++) {
ykp1[i] *= yv[i];
ret[i] += u[nun]*( yk[i].real()/(nun-2.0)-ykp1[i].real()/(nun-0.0) ); // real(yk)/(n-3)-real(ykp1)/(n-1)
yk[i] *= yv[i];
}
}
}
}
}
ret[i] *= pi*C;
}
} // extern "C"
|
5,995 | #include <ctime>
#include <cuda.h>
#include <iomanip>
#include <iostream>
using namespace std;
#define MASK_WIDTH 5
#define WIDTH 7
// Secuencial
void convolution_1D(double *v, double *mask, double *result) {
for (int i = 0; i < WIDTH; i++) {
double Pvalue = 0;
int N_start_point = i - (MASK_WIDTH / 2);
for (int j = 0; j < MASK_WIDTH; j++) {
if (N_start_point + j >= 0 && N_start_point + j < WIDTH) {
Pvalue += v[N_start_point + j] * mask[j];
}
}
result[i] = Pvalue;
}
}
__global__
void convolution_1D_kernel(double *v, double *mask, double *result) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double Pvalue = 0;
int N_start_point = idx - (MASK_WIDTH / 2);
for (int j = 0; j < MASK_WIDTH; j++) {
if (N_start_point + j >= 0 && N_start_point + j < WIDTH) {
Pvalue += v[N_start_point + j] * mask[j];
}
}
result[idx] = Pvalue;
}
void printVector(double *v) {
cout << "[";
for (int i = 0; i < WIDTH; i++) {
if (i) cout << ", ";
cout << setprecision(0) << v[i];
}
cout << "]" << endl;
}
void fillVector(double *v) {
for (int i = 0; i < WIDTH; i++) {
v[i] = i + 1;
}
}
int main() {
// Host variables
double h_mask[] = {3, 4, 5, 4, 3};
double *h_v = new double[WIDTH];
double *h_result = new double[WIDTH];
double *ans = new double[WIDTH];
cout << fixed;
fillVector(h_v);
// Host
{
clock_t start = clock();
convolution_1D(h_v, h_mask, h_result);
printVector(h_v);
printVector(h_result);
clock_t end = clock();
double time_used = double(end - start) / CLOCKS_PER_SEC;
cout << "Tiempo invertido CPU = " << setprecision(10) << time_used << "s" << endl << endl;
}
// Device variables
double *d_mask, *d_v, *d_result;
int blockSize = 4;
dim3 dimBlock(blockSize, 1, 1);
dim3 dimGrid(ceil(WIDTH / float(blockSize)), 1, 1);
cudaMalloc(&d_mask, sizeof(double) * MASK_WIDTH);
cudaMalloc(&d_v, sizeof(double) * WIDTH);
cudaMalloc(&d_result, sizeof(double) * WIDTH);
// Device
{
clock_t start = clock();
cudaMemcpy(d_v, h_v, sizeof(double) * WIDTH, cudaMemcpyHostToDevice);
cudaMemcpy(d_mask, h_mask, sizeof(double) * MASK_WIDTH, cudaMemcpyHostToDevice);
convolution_1D_kernel<<< dimGrid, dimBlock >>>(d_v, d_mask, d_result);
cudaMemcpy(ans, d_result, sizeof(double) * WIDTH, cudaMemcpyDeviceToHost);
printVector(h_v);
printVector(ans);
clock_t end = clock();
double time_used = double(end - start) / CLOCKS_PER_SEC;
cout << "Tiempo invertido GPU = " << setprecision(10) << time_used << "s" << endl;
}
delete h_v;
delete h_result;
delete ans;
cudaFree(d_mask);
cudaFree(d_v);
cudaFree(d_result);
return 0;
}
|
5,996 | #include "includes.h"
__global__ void accumulateColsInplaceKernel(float *input, int channels, int h, int w) {
// in-place.
// input is already a `channels * (h+1) x (w+1)` array
// global column index (of all `channels * w` columns in this image)
int colIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
if (colIdx < channels * w) {
input += (colIdx / w) * (h+1) * (w+1); // jump to current channel
colIdx %= w; // switch to local column index,
++colIdx; // it's 1-indexed because first output column is always zero
input[colIdx] = 0; // first element of every column is always zero
double sum = 0;
for (int i = 1; i <= h; ++i) {
float *currentElement = &input[i * (w+1) + colIdx];
sum += static_cast<double>(*currentElement);
*currentElement = static_cast<float>(sum);
}
}
} |
5,997 | #include <stdio.h>
#include <stdlib.h>
__global__ void print_from_device(void){
printf("Hello World! from device\n");
}
__global__ void print_from_device_w_id(void){
printf("Hello World! from device (block : %d, threads : %d)\n",blockIdx.x,threadIdx.x);
}
int main(void){
printf("Hello World From host!\n");
int threads = 4;
int blocks = 2;
print_from_device_w_id<<<threads,blocks>>>();
cudaDeviceSynchronize();
return 0;
}
|
5,998 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda.h>
const static int N = 11;
// kernel funtion
__global__
void calcColumn(int* row, const int rowNmb) {
//global index
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tmp;
// calculate i-th element for increasing rows
for (int countRow = 0; countRow < rowNmb; countRow++) {
if (i == 0) {
tmp = 1;
}
else if (i <= countRow) {
tmp = tmp + row[i - 1];
}
else {
tmp = 0;
}
// wait for other threads to finish before overwriting i-1-th element with the i-th
__syncthreads();
row[i] = tmp;
// wait before all threads have written i-th element
__syncthreads();
// 0-th thread writes current row to output
if (i == 0) {
for (int j = 0; j < rowNmb; j++) {
if (row[j] == 0) continue;
printf("%d\t", row[j]);
}
printf("\n");
}
}
}
// recursive factorial function for checking result
int fac(const int n) {
if (n <= 0) return 1;
return n*fac(n-1);
}
int main() {
int* Drow;
int size = sizeof(int) * N;
// allocate memory for device
cudaMalloc((void**)&Drow, size);
// calculate addition for all N entries in parallel
calcColumn<<<1, N>>> (Drow, N);
// wait for GPU
cudaDeviceSynchronize();
// print check
printf("Control row:\n");
for (int i = 0; i < N; i++) {
printf("%d\t", (int)(fac(N-1) / (float)fac(i) / (float)fac(N-1-i)));
}
printf("\n");
//free memory
cudaFree(Drow);
return 0;
}
|
5,999 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void curvi (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_86_ = 2.0 * mu[i+2][j][k];
_t_86_ += la[i+2][j][k];
double _t_83_ = met1[i+2][j][k] * _t_86_ * met2[i+2][j][k];
double _v_38_ = c2 * u1[i+2][j][k+2];
double _v_0_ = c2 * u1[i+2][j][k+2];
double _v_76_ = c2 * u1[i][j+2][k+2];
_v_76_ -= c2 * u1[i][j-2][k+2];
double _v_79_ = c2 * u2[i][j+2][k+2];
_v_79_ -= c2 * u2[i][j-2][k+2];
double _v_82_ = c2 * u1[i][j+2][k-2];
_v_82_ -= c2 * u1[i][j-2][k-2];
double _v_85_ = c2 * u2[i][j+2][k-2];
_v_85_ -= c2 * u2[i][j-2][k-2];
double _v_89_ = c2 * u1[i][j+2][k+1];
_v_89_ -= c2 * u1[i][j-2][k+1];
double _v_92_ = c2 * u2[i][j+2][k+1];
_v_92_ -= c2 * u2[i][j-2][k+1];
double _v_95_ = c2 * u1[i][j+2][k-1];
_v_95_ -= c2 * u1[i][j-2][k-1];
double _v_98_ = c2 * u2[i][j+2][k-1];
_v_98_ -= c2 * u2[i][j-2][k-1];
_v_38_ -= c2 * u1[i+2][j][k-2];
double _v_9_ = c2 * u1[i+2][j][k-2];
double _t_84_ = _v_38_;
double _v_39_ = c1 * u1[i+2][j][k+1];
double _v_77_ = c1 * u1[i][j+1][k+2];
_v_77_ -= c1 * u1[i][j-1][k+2];
double _v_80_ = c1 * u2[i][j+1][k+2];
_v_80_ -= c1 * u2[i][j-1][k+2];
double _v_83_ = c1 * u1[i][j+1][k-2];
_v_83_ -= c1 * u1[i][j-1][k-2];
double _v_86_ = c1 * u2[i][j+1][k-2];
_v_86_ -= c1 * u2[i][j-1][k-2];
double _v_90_ = c1 * u1[i][j+1][k+1];
_v_90_ -= c1 * u1[i][j-1][k+1];
double _v_93_ = c1 * u2[i][j+1][k+1];
_v_93_ -= c1 * u2[i][j-1][k+1];
double _v_96_ = c1 * u1[i][j+1][k-1];
_v_96_ -= c1 * u1[i][j-1][k-1];
double _v_99_ = c1 * u2[i][j+1][k-1];
_v_99_ -= c1 * u2[i][j-1][k-1];
_v_39_ -= c1 * u1[i+2][j][k-1];
_t_84_ += _v_39_;
double _v_40_ = strx[i] * _t_83_ * _t_84_;
double _v_19_ = c2 * u1[i+2][j][k+1];
double _v_28_ = c2 * u1[i+2][j][k-1];
double _v_56_ = c2 * _v_40_;
double _v_41_ = c2 * u2[i+2][j][k+2];
double _v_3_ = c2 * u2[i+2][j][k+2];
_v_41_ -= c2 * u2[i+2][j][k-2];
double _v_12_ = c2 * u2[i+2][j][k-2];
double _t_91_ = _v_41_;
double _v_42_ = c1 * u2[i+2][j][k+1];
_v_42_ -= c1 * u2[i+2][j][k-1];
_t_91_ += _v_42_;
double _t_90_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
double _v_43_ = stry[j] * _t_90_ * _t_91_;
double _v_22_ = c2 * u2[i+2][j][k+1];
double _v_31_ = c2 * u2[i+2][j][k-1];
_v_56_ += c2 * _v_43_;
double _t_95_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
double _v_44_ = c2 * u3[i+2][j][k+2];
double _v_6_ = c2 * u3[i+2][j][k+2];
_v_44_ -= c2 * u3[i+2][j][k-2];
double _v_15_ = c2 * u3[i+2][j][k-2];
double _t_96_ = _v_44_;
double _v_45_ = c1 * u3[i+2][j][k+1];
_v_45_ -= c1 * u3[i+2][j][k-1];
_t_96_ += _v_45_;
double _v_46_ = _t_95_ * _t_96_;
double _v_25_ = c2 * u3[i+2][j][k+1];
double _v_34_ = c2 * u3[i+2][j][k-1];
_v_56_ += c2 * _v_46_;
double _t_104_ = 2.0 * mu[i-2][j][k];
_t_104_ += la[i-2][j][k];
double _t_101_ = met1[i-2][j][k] * _t_104_ * met2[i-2][j][k];
double _v_47_ = c2 * u1[i-2][j][k+2];
_v_0_ -= c2 * u1[i-2][j][k+2];
_v_47_ -= c2 * u1[i-2][j][k-2];
_v_9_ -= c2 * u1[i-2][j][k-2];
double _t_102_ = _v_47_;
double _v_48_ = c1 * u1[i-2][j][k+1];
_v_48_ -= c1 * u1[i-2][j][k-1];
_t_102_ += _v_48_;
double _v_49_ = strx[i] * _t_101_ * _t_102_;
_v_19_ -= c2 * u1[i-2][j][k+1];
_v_28_ -= c2 * u1[i-2][j][k-1];
_v_56_ += c2 * _v_49_;
double _v_50_ = c2 * u2[i-2][j][k+2];
_v_3_ -= c2 * u2[i-2][j][k+2];
_v_50_ -= c2 * u2[i-2][j][k-2];
_v_12_ -= c2 * u2[i-2][j][k-2];
double _t_109_ = _v_50_;
double _v_51_ = c1 * u2[i-2][j][k+1];
_v_51_ -= c1 * u2[i-2][j][k-1];
_t_109_ += _v_51_;
double _t_108_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
double _v_52_ = stry[j] * _t_108_ * _t_109_;
_v_22_ -= c2 * u2[i-2][j][k+1];
_v_31_ -= c2 * u2[i-2][j][k-1];
_v_56_ += c2 * _v_52_;
double _t_113_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
double _v_53_ = c2 * u3[i-2][j][k+2];
_v_6_ -= c2 * u3[i-2][j][k+2];
_v_53_ -= c2 * u3[i-2][j][k-2];
_v_15_ -= c2 * u3[i-2][j][k-2];
double _t_114_ = _v_53_;
double _v_54_ = c1 * u3[i-2][j][k+1];
_v_54_ -= c1 * u3[i-2][j][k-1];
_t_114_ += _v_54_;
double _v_55_ = _t_113_ * _t_114_;
_v_25_ -= c2 * u3[i-2][j][k+1];
_v_34_ -= c2 * u3[i-2][j][k-1];
_v_56_ += c2 * _v_55_;
double _t_79_ = stry[j] * _v_56_;
double _t_123_ = 2.0 * mu[i+1][j][k];
_t_123_ += la[i+1][j][k];
double _t_120_ = met1[i+1][j][k] * _t_123_ * met2[i+1][j][k];
double _v_57_ = c2 * u1[i+1][j][k+2];
_v_57_ -= c2 * u1[i+1][j][k-2];
double _t_121_ = _v_57_;
double _v_58_ = c1 * u1[i+1][j][k+1];
double _v_20_ = c1 * u1[i+1][j][k+1];
_v_58_ -= c1 * u1[i+1][j][k-1];
double _v_29_ = c1 * u1[i+1][j][k-1];
_t_121_ += _v_58_;
double _v_59_ = strx[i] * _t_120_ * _t_121_;
double _v_1_ = c1 * u1[i+1][j][k+2];
double _v_10_ = c1 * u1[i+1][j][k-2];
double _v_75_ = c1 * _v_59_;
double _v_60_ = c2 * u2[i+1][j][k+2];
_v_60_ -= c2 * u2[i+1][j][k-2];
double _t_128_ = _v_60_;
double _v_61_ = c1 * u2[i+1][j][k+1];
double _v_23_ = c1 * u2[i+1][j][k+1];
_v_61_ -= c1 * u2[i+1][j][k-1];
double _v_32_ = c1 * u2[i+1][j][k-1];
_t_128_ += _v_61_;
double _t_127_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
double _v_62_ = stry[j] * _t_127_ * _t_128_;
double _v_4_ = c1 * u2[i+1][j][k+2];
double _v_13_ = c1 * u2[i+1][j][k-2];
_v_75_ += c1 * _v_62_;
double _t_132_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
double _v_63_ = c2 * u3[i+1][j][k+2];
_v_63_ -= c2 * u3[i+1][j][k-2];
double _t_133_ = _v_63_;
double _v_64_ = c1 * u3[i+1][j][k+1];
double _v_26_ = c1 * u3[i+1][j][k+1];
_v_64_ -= c1 * u3[i+1][j][k-1];
double _v_35_ = c1 * u3[i+1][j][k-1];
_t_133_ += _v_64_;
double _v_65_ = _t_132_ * _t_133_;
double _v_7_ = c1 * u3[i+1][j][k+2];
double _v_16_ = c1 * u3[i+1][j][k-2];
_v_75_ += c1 * _v_65_;
double _t_141_ = 2.0 * mu[i-1][j][k];
_t_141_ += la[i-1][j][k];
double _t_138_ = met1[i-1][j][k] * _t_141_ * met2[i-1][j][k];
double _v_66_ = c2 * u1[i-1][j][k+2];
_v_66_ -= c2 * u1[i-1][j][k-2];
double _t_139_ = _v_66_;
double _v_67_ = c1 * u1[i-1][j][k+1];
_v_20_ -= c1 * u1[i-1][j][k+1];
_v_67_ -= c1 * u1[i-1][j][k-1];
_v_29_ -= c1 * u1[i-1][j][k-1];
_t_139_ += _v_67_;
double _v_68_ = strx[i] * _t_138_ * _t_139_;
_v_1_ -= c1 * u1[i-1][j][k+2];
_v_10_ -= c1 * u1[i-1][j][k-2];
_v_75_ += c1 * _v_68_;
double _v_69_ = c2 * u2[i-1][j][k+2];
_v_69_ -= c2 * u2[i-1][j][k-2];
double _t_146_ = _v_69_;
double _v_70_ = c1 * u2[i-1][j][k+1];
_v_23_ -= c1 * u2[i-1][j][k+1];
_v_70_ -= c1 * u2[i-1][j][k-1];
_v_32_ -= c1 * u2[i-1][j][k-1];
_t_146_ += _v_70_;
double _t_145_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
double _v_71_ = stry[j] * _t_145_ * _t_146_;
_v_4_ -= c1 * u2[i-1][j][k+2];
_v_13_ -= c1 * u2[i-1][j][k-2];
_v_75_ += c1 * _v_71_;
double _t_150_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
double _v_72_ = c2 * u3[i-1][j][k+2];
_v_72_ -= c2 * u3[i-1][j][k-2];
double _t_151_ = _v_72_;
double _v_73_ = c1 * u3[i-1][j][k+1];
_v_26_ -= c1 * u3[i-1][j][k+1];
_v_73_ -= c1 * u3[i-1][j][k-1];
_v_35_ -= c1 * u3[i-1][j][k-1];
_t_151_ += _v_73_;
double _v_74_ = _t_150_ * _t_151_;
_v_7_ -= c1 * u3[i-1][j][k+2];
_v_16_ -= c1 * u3[i-1][j][k-2];
_v_75_ += c1 * _v_74_;
_t_79_ += stry[j] * _v_75_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_79_;
double _t_17_ = _v_6_;
_t_17_ += _v_7_;
double _t_16_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
double _v_8_ = stry[j] * _t_16_ * _t_17_;
double _v_18_ = c2 * _v_8_;
double _t_5_ = _v_0_;
_t_5_ += _v_1_;
double _t_7_ = 2.0 * mu[i][j][k+2];
double _t_10_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_7_ += la[i][j][k+2];
double _t_4_ = met1[i][j][k+2] * _t_7_ * met2[i][j][k+2];
double _t_164_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = stry[j] * _t_3_ * strx[i];
_v_18_ += c2 * _v_2_;
double _t_11_ = _v_3_;
_t_11_ += _v_4_;
double _v_5_ = _t_10_ * _t_11_;
_v_18_ += c2 * _v_5_;
double _t_24_ = _v_9_;
_t_24_ += _v_10_;
double _t_26_ = 2.0 * mu[i][j][k-2];
_t_26_ += la[i][j][k-2];
double _t_23_ = met1[i][j][k-2] * _t_26_ * met2[i][j][k-2];
double _t_176_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
double _t_22_ = _t_23_ * _t_24_;
double _v_11_ = stry[j] * _t_22_ * strx[i];
_v_18_ += c2 * _v_11_;
double _t_30_ = _v_12_;
_t_30_ += _v_13_;
double _t_29_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
double _t_35_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
double _v_14_ = _t_29_ * _t_30_;
_v_18_ += c2 * _v_14_;
double _t_36_ = _v_15_;
_t_36_ += _v_16_;
double _v_17_ = stry[j] * _t_35_ * _t_36_;
_v_18_ += c2 * _v_17_;
double _t_0_ = _v_18_;
double _t_56_ = _v_25_;
_t_56_ += _v_26_;
double _t_55_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
double _v_27_ = stry[j] * _t_55_ * _t_56_;
double _v_37_ = c1 * _v_27_;
double _t_44_ = _v_19_;
_t_44_ += _v_20_;
double _t_46_ = 2.0 * mu[i][j][k+1];
double _t_49_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_46_ += la[i][j][k+1];
double _t_43_ = met1[i][j][k+1] * _t_46_ * met2[i][j][k+1];
double _t_189_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
double _t_42_ = _t_43_ * _t_44_;
double _v_21_ = stry[j] * _t_42_ * strx[i+2];
_v_37_ += c1 * _v_21_;
double _t_50_ = _v_22_;
_t_50_ += _v_23_;
double _v_24_ = _t_49_ * _t_50_;
_v_37_ += c1 * _v_24_;
double _t_63_ = _v_28_;
_t_63_ += _v_29_;
double _t_65_ = 2.0 * mu[i][j][k-1];
_t_65_ += la[i][j][k-1];
double _t_62_ = met1[i][j][k-1] * _t_65_ * met2[i][j][k-1];
double _t_201_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
double _t_61_ = _t_62_ * _t_63_;
double _v_30_ = stry[j] * _t_61_ * strx[i-2];
_v_37_ += c1 * _v_30_;
double _t_69_ = _v_31_;
_t_69_ += _v_32_;
double _t_68_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
double _t_74_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
double _v_33_ = _t_68_ * _t_69_;
_v_37_ += c1 * _v_33_;
double _t_75_ = _v_34_;
_t_75_ += _v_35_;
double _v_36_ = stry[j] * _t_74_ * _t_75_;
_v_37_ += c1 * _v_36_;
_t_0_ += _v_37_;
r1ic0jc0kc0 += _t_0_;
double _t_159_ = _t_10_;
double _t_160_ = _v_76_;
_t_160_ += _v_77_;
double _t_158_ = _t_159_ * _t_160_;
double _v_78_ = strx[i] * _t_158_ * stry[j+2];
double _v_88_ = c2 * _v_78_;
double _t_165_ = _v_79_;
_t_165_ += _v_80_;
double _v_81_ = _t_164_ * _t_165_;
_v_88_ += c2 * _v_81_;
double _t_171_ = _t_29_;
double _t_172_ = _v_82_;
_t_172_ += _v_83_;
double _t_170_ = _t_171_ * _t_172_;
double _v_84_ = strx[i] * _t_170_ * stry[j];
_v_88_ += c2 * _v_84_;
double _t_177_ = _v_85_;
_t_177_ += _v_86_;
double _v_87_ = _t_176_ * _t_177_;
_v_88_ += c2 * _v_87_;
double _t_155_ = _v_88_;
double _t_184_ = _t_49_;
double _t_185_ = _v_89_;
_t_185_ += _v_90_;
double _t_183_ = _t_184_ * _t_185_;
double _v_91_ = strx[i] * _t_183_ * stry[j-2];
double _v_101_ = c1 * _v_91_;
double _t_190_ = _v_92_;
_t_190_ += _v_93_;
double _v_94_ = _t_189_ * _t_190_;
_v_101_ += c1 * _v_94_;
double _t_196_ = _t_68_;
double _t_197_ = _v_95_;
_t_197_ += _v_96_;
double _t_195_ = _t_196_ * _t_197_;
double _v_97_ = strx[i] * _t_195_ * stry[j];
_v_101_ += c1 * _v_97_;
double _t_202_ = _v_98_;
_t_202_ += _v_99_;
double _v_100_ = _t_201_ * _t_202_;
_v_101_ += c1 * _v_100_;
_t_155_ += _v_101_;
r1ic0jc0kc0 += _t_155_;
r1[i][j][k] = r1ic0jc0kc0;
r1[i][j][k] += c2*(
mu[i][j+2][k]*met3[i][j+2][k]*met1[i][j+2][k]*(
c2*(u1[i][j+2][k+2]-u1[i][j+2][k-2]) +
c1*(u1[i][j+2][k+1]-u1[i][j+2][k-1]) )*stry[j+1]*strx[i]
+ mu[i][j+2][k]*met2[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i][j+2][k+2]-u2[i][j+2][k-2]) +
c1*(u2[i][j+2][k+1]-u2[i][j+2][k-1]) )
+ ( mu[i][j-2][k]*met3[i][j-2][k]*met1[i][j-2][k]*(
c2*(u1[i][j-2][k+2]-u1[i][j-2][k-2]) +
c1*(u1[i][j-2][k+1]-u1[i][j-2][k-1]) )*stry[j]*strx[i]
+ mu[i][j-2][k]*met2[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i][j-2][k+2]-u2[i][j-2][k-2]) +
c1*(u2[i][j-2][k+1]-u2[i][j-2][k-1]) ) )
) + c1*(
mu[i][j+1][k]*met3[i][j+1][k]*met1[i][j+1][k]*(
c2*(u1[i][j+1][k+2]-u1[i][j+1][k-2]) +
c1*(u1[i][j+1][k+1]-u1[i][j+1][k-1]) )*stry[j-1]*strx[i]
+ mu[i][j+1][k]*met2[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i][j+1][k+2]-u2[i][j+1][k-2]) +
c1*(u2[i][j+1][k+1]-u2[i][j+1][k-1]) )
+ ( mu[i][j-1][k]*met3[i][j-1][k]*met1[i][j-1][k]*(
c2*(u1[i][j-1][k+2]-u1[i][j-1][k-2]) +
c1*(u1[i][j-1][k+1]-u1[i][j-1][k-1]) )*stry[j]*strx[i]
+ mu[i][j-1][k]*met2[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i][j-1][k+2]-u2[i][j-1][k-2]) +
c1*(u2[i][j-1][k+1]-u2[i][j-1][k-1]) ) ) );
r1[i][j][k] +=
c2*( mu[i][j+2][k]*met1[i][j+2][k]*met1[i][j+2][k]*(
c2*(u2[i+2][j+2][k]-u2[i-2][j+2][k]) +
c1*(u2[i+1][j+2][k]-u2[i-1][j+2][k]) )
+ mu[i][j-2][k]*met1[i][j-2][k]*met1[i][j-2][k]*(
c2*(u2[i+2][j-2][k]-u2[i-2][j-2][k])+
c1*(u2[i+1][j-2][k]-u2[i-1][j-2][k]) )
) +
c1*( mu[i][j+1][k]*met1[i][j+1][k]*met1[i][j+1][k]*(
c2*(u2[i+2][j+1][k]-u2[i-2][j+1][k]) +
c1*(u2[i+1][j+1][k]-u2[i-1][j+1][k]) )
+ mu[i][j-1][k]*met1[i][j-1][k]*met1[i][j-1][k]*(
c2*(u2[i+2][j-1][k]-u2[i-2][j-1][k]) +
c1*(u2[i+1][j-1][k]-u2[i-1][j-1][k])))
+
c2*( la[i+2][j][k]*met1[i+2][j][k]*met1[i+2][j][k]*(
c2*(u2[i+2][j+2][k]-u2[i+2][j-2][k]) +
c1*(u2[i+2][j+1][k]-u2[i+2][j-1][k]) )
+ la[i-2][j][k]*met1[i-2][j][k]*met1[i-2][j][k]*(
c2*(u2[i-2][j+2][k]-u2[i-2][j-2][k])+
c1*(u2[i-2][j+1][k]-u2[i-2][j-1][k]) )
) +
c1*( la[i+1][j][k]*met1[i+1][j][k]*met1[i+1][j][k]*(
c2*(u2[i+1][j+2][k]-u2[i+1][j-2][k]) +
c1*(u2[i+1][j+1][k]-u2[i+1][j-1][k]) )
+ la[i-1][j][k]*met1[i-1][j][k]*met1[i-1][j][k]*(
c2*(u2[i-1][j+2][k]-u2[i-1][j-2][k]) +
c1*(u2[i-1][j+1][k]-u2[i-1][j-1][k])));
}
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
6,000 | #include <math.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define WARP_SIZE 32
#define MAX_THREADS_X 1024
#define MAX_THREADS_Y 1024
#define MAX_THREADS_Z 64
#define MAX_BLOCKS_X 2147483647
#define MAX_BLOCKS_Y 65535
#define MAX_BLOCKS_Z 65535
#define THREADS_PER_BLOCK 128
//3.0, 16 blocks, 2048 threads
//MIN THREADS_PER_BLOCK = 128
#define SIZE 1024
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, cudaGetErrorString(err));
//cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__)
__global__ void scan(int *input, int n, int d, int size)
{
int i = blockIdx.x *blockDim.x + threadIdx.x;
//printf("i: %d\n", i);
//if(n == 1)
// input[i + (int)pow(2.0, (double)(d+1)) - 1] = 0;
//else
//if (i < n )
{
i*=pow(2.0, (double)d+1);
if(i + (int)pow(2.0, (double)(d+1)) - 1 < size)
{
input[i + (int)pow(2.0, (double)(d+1)) - 1] = input[i + (int)pow(2.0,(double)d) - 1] + input[i + (int)pow(2.0,(double)(d + 1)) - 1];
//printf("[%d(%d+%d-1)] = [%d] + [%d], SIZE=%d\n", i + (int)pow(2.0, (double)(d+1)) - 1, i,(int)pow(2.0, (double)(d+1)), i + (int)pow(2.0,(double)d) - 1,i + (int)pow(2.0,(double)(d + 1)) - 1, size);
}
}
}
__global__ void down_sweep(int *input, int n, int d, int size)
{
int i = blockIdx.x *blockDim.x + threadIdx.x;
//if (i < n)
{
i*=pow(2.0, (double)d+1);
if(i + (int)pow(2.0, (double)(d+1)) - 1 < size)
{
int temp = input[i + (int)pow(2.0, (double)d) - 1];
input[i + (int)pow(2.0, (double)d) -1] = input[i + (int)pow(2.0, (double)(d+1)) - 1];
input[i + (int)pow(2.0, (double)(d+1)) - 1] = temp + input[i + (int)pow(2.0, (double)(d+1)) - 1];
}
}
}
__global__ void quickfix(int *input, int size)
{
input[size-1] = 0;
}
int main()
{
int * input = (int *)malloc(SIZE * sizeof(int));// = {1,2,3,4,5,6,7,8,9};
for (int i = 0; i < SIZE; i++)
input[i] = i;
int *d_input;
int d = ceil(log2((float)SIZE));
SAFE_CALL(cudaMalloc<int>(&d_input, SIZE*sizeof(int)), "CUDA Malloc Failed");
SAFE_CALL(cudaMemcpy(d_input, input, SIZE*sizeof(int), cudaMemcpyHostToDevice ), "CUDA Memcpy Host To Device Failed");
for (int i = 0; i < d; i++)
{
//int numop = ceil(SIZE/2)
int numop = ceil(SIZE/pow(2, i+1));
int bloques = ceil((float)numop/THREADS_PER_BLOCK);
printf("numop: %d", numop);
scan<<<bloques,THREADS_PER_BLOCK>>>(d_input, numop, i, SIZE);
}
quickfix<<<1,1>>>(d_input, SIZE);
int numop2 = 1;
for (int i = d - 1; i >= 0; i--)
{
int bloques = ceil((float)numop2/THREADS_PER_BLOCK);
down_sweep<<<bloques,THREADS_PER_BLOCK>>>(d_input, numop2, i, SIZE);
numop2*=2;
}
SAFE_CALL(cudaMemcpy(input, d_input, SIZE*sizeof(int), cudaMemcpyDeviceToHost), "CUDA Memcpy Device To Host Failed");
SAFE_CALL(cudaFree(d_input), "CUDA Free Failed");
for(int i = 0; i < SIZE; i++)
{
printf("%d\n", input[i]);
}
free(input);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.