serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
19,101 | #include "includes.h"
__global__ void matadd_2d(const float *a, const float *b, float *c, int n, int m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockIdx.y;
if(i < n and j < m){
int idx = j * n + i;
c[idx] = a[idx] + b[idx];
}
} |
19,102 | /// stuff happening
// nvall -o mdCuda mdCuda.cu -g -G -lrt -lm
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#define LINUX 1 // is this on a linux machine??
#define NEAREST 0 // Are we going to use nearest algorithm
#define OPT 0 // N^2 or optimized code??
#define EPS 1
#define SIG 1e-2
#define CUT 2.5
#define RCUT (CUT*SIG)
#define CUT2 CUT*CUT
#define PI 3.14159265
#define DT 0.001 // 0.001 second time increments definitely want to change this
#define N_BODY_NUM 500000
#define XMAX (BOX_SIZE/2.0)
#define XMIN -(BOX_SIZE/2.0)
#define YMAX (BOX_SIZE/2.0)
#define YMIN -(BOX_SIZE/2.0)
#define T0 1
#define MAX_TRIALS 100
#define ITERS 100
#define BOX_SIZE 10.0
#define GRID_NUM ((BOX_SIZE)/(RCUT))
#define BLOCK_LENGTH(GRID_NUM,BOX_SIZE) (BOX_SIZE/GRID_NUM) // size of block that contains GRID_BLOCK_NUM
#define EST_NUM(GRID_NUM,N_BODY_NUM) (N_BODY_NUM/(GRID_NUM*GRID_NUM))
typedef struct sim_param_t {
int npart;
float dt;
float eps_lj;
float sig_lj;
}params;
typedef struct molecule_t {
float* x;
float* v;
float* a;
float* F;
}mols;
__device__ void compute_forces_naive(int n, int k, float* x, float* F);
__device__ void box_reflect(int k, float* x, float* v, float* a);
__device__ void reflect(float wall, float* x, float* v, float* a);
__device__ void verletInt2(int k, float dt, float* x, float* v, float* a);
__device__ float compute_LJ_Scalar(float r2, float eps, float sig2);
__device__ void verletInt1(int k, float dt, float* x, float* v, float* a);
int init_particles(int n, float* x, float* v, params* param);
void init_particles_va(int n, float* v,float* a, params* param);
// Just a prototype and declaration
struct timespec diff(struct timespec start, struct timespec end);
void cudaErrorCheck(cudaError_t err);
__global__ void kernel_VanDerWaals(float* x, float* v, float* a, float* F, int particles)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
int r,k;
float dt = 0.0001;
for(r=0;r<ITERS;r++){
for(k = 0; k < particles; k++)
{
if(i!=k)
{
verletInt1(k, dt, x, v, a);
box_reflect(k, x, v, a);
compute_forces_naive(i, k, x, F);
verletInt2(k, dt, x, v, a);
}
}
memset((F+2*i),0,2*sizeof(float));
__syncthreads();
}
}
int main(int argc, char **argv){
int nsize = N_BODY_NUM;
if(argc==2)
{
nsize = atoi(argv[1]);
}
struct timespec time1,time2;
struct timespec time_stamp;
cudaError_t err = cudaSuccess;
// start timing of entire program
clock_gettime(CLOCK_REALTIME, &time1);
// Timing related variables
float elapsed_gpu[2];
// global information
params param;
param.npart = nsize;
param.dt = DT;
param.eps_lj = EPS;
param.sig_lj = SIG;
// declare size in bytes
size_t size = 2 * (param.npart) * sizeof(float);
// Arrays on GPU global memory
mols d_mol;
// Arrays on the host memory
mols h_mol;
// Allocate arrays on host memory
h_mol.x = (float *) malloc(size);
h_mol.v = (float *) malloc(size);
h_mol.a = (float *) malloc(size);
h_mol.F = (float *) malloc(size);
cudaEvent_t start1,stop1;
err=cudaEventCreate(&start1);
cudaErrorCheck(err);
err = cudaThreadSynchronize();
cudaErrorCheck(err);
err = cudaEventRecord(start1,0);
cudaErrorCheck(err);
printf("About to cudaMalloc\n");
err = cudaMalloc((void**) &d_mol.x, size);
cudaErrorCheck(err);
err = cudaMalloc((void**) &d_mol.v, size);
cudaErrorCheck(err);
err = cudaMalloc((void**) &d_mol.a, size);
cudaErrorCheck(err);
err = cudaMalloc((void**) &d_mol.F, size);
cudaErrorCheck(err);
printf("Finished the cudaMalloc\n");
// Initialize the host arrays
printf("\nInitializing the Particles ...");
param.npart = init_particles(param.npart, h_mol.x, h_mol.v, ¶m);
init_particles_va(param.npart, h_mol.v, h_mol.a, ¶m);
printf("\t... done\n\n");
// Transfer the arrays to the GPU memory
printf("About to cudaMemcpy\n");
err = cudaMemcpy(d_mol.x, h_mol.x, size , cudaMemcpyHostToDevice);
cudaErrorCheck(err);
err = cudaMemcpy(d_mol.v, h_mol.v, size , cudaMemcpyHostToDevice);
cudaErrorCheck(err);
err = cudaMemcpy(d_mol.a, h_mol.a, size , cudaMemcpyHostToDevice);
cudaErrorCheck(err);
err = cudaMemcpy(d_mol.F, h_mol.F, size , cudaMemcpyHostToDevice);
cudaErrorCheck(err);
printf("Finished cudaMemcpy\n");
// create timer and start the timer
cudaEvent_t start,stop;
err=cudaEventCreate(&start);
cudaErrorCheck(err);
err = cudaThreadSynchronize();
cudaErrorCheck(err);
err = cudaEventRecord(start,0);
cudaErrorCheck(err);
/// gives the ceiling function for # of blocks --> Launch the kernel
int blocksPerGrid = ((param.npart+255)/256);
printf("\n%d\n",blocksPerGrid);
dim3 dimGrid(blocksPerGrid);
dim3 dimBlock(256);
// Generate actual cuda call
printf("Making call to kernel\n");
kernel_VanDerWaals<<< blocksPerGrid,dimBlock >>>(d_mol.x, d_mol.v, d_mol.a, d_mol.F, param.npart);
// Transfer the results back to the host
printf("Waiting for computation to complete...\n");
// just added this line for debugging purposes
err = cudaThreadSynchronize();
cudaErrorCheck(err);
// Check if kernel execution generated an error
err = cudaGetLastError();
cudaErrorCheck(err);
err = cudaMemcpy( h_mol.x , d_mol.x , size ,cudaMemcpyDeviceToHost);
cudaErrorCheck(err);
printf("Memcpy #1 complete ...\n");
err = cudaMemcpy( h_mol.v , d_mol.v , size ,cudaMemcpyDeviceToHost);
cudaErrorCheck(err);
printf("Memcpy #2 complete ...\n");
err = cudaMemcpy( h_mol.a , d_mol.a , size ,cudaMemcpyDeviceToHost);
cudaErrorCheck(err);
printf("Memcpy #3 complete ...\n");
err = cudaMemcpy( h_mol.F , d_mol.F , size ,cudaMemcpyDeviceToHost);
cudaErrorCheck(err);
printf("Memcpy #4 complete ...\n");
printf("Complete!\n");
// Stop and destroy the timer
err = cudaThreadSynchronize();
cudaErrorCheck(err);
err = cudaEventCreate(&stop1);
cudaErrorCheck(err);
err = cudaEventRecord(stop1,0);
cudaErrorCheck(err);
err = cudaEventSynchronize(stop1);
cudaErrorCheck(err);
err = cudaEventCreate(&stop);
cudaErrorCheck(err);
err = cudaEventRecord(stop,0);
cudaErrorCheck(err);
err = cudaEventSynchronize(stop);
cudaErrorCheck(err);
// Get the time
cudaEventElapsedTime(&elapsed_gpu[0],start,stop); // inlcuding kernel call only
cudaEventElapsedTime(&elapsed_gpu[1],start1,stop1); // including memcopy
// Clean up our mess
err = cudaEventDestroy(start);
cudaErrorCheck(err);
err = cudaEventDestroy(stop);
cudaErrorCheck(err);
err = cudaEventDestroy(start1);
cudaErrorCheck(err);
err = cudaEventDestroy(stop1);
cudaErrorCheck(err);
clock_gettime(CLOCK_REALTIME, &time2);
time_stamp = diff(time1,time2);
printf("\nFinal times\n");
printf("ArraySize, GPU time (msec)\n");
//printf("GPU time: %f (msec)\t Array Size: %d\n", elapsed_gpu[i],BASE+DELTA*i);
printf("Time to run kernel: %f\n",elapsed_gpu[0]);
printf("Time to run kernel with memcopy: %f\n",elapsed_gpu[1]);
printf("Time to run serial code: %lf\n",time_stamp.tv_sec + time_stamp.tv_nsec/1e9);
err = cudaFree(d_mol.x);
cudaErrorCheck(err);
err = cudaFree(d_mol.v);
cudaErrorCheck(err);
err = cudaFree(d_mol.a);
cudaErrorCheck(err);
err = cudaFree(d_mol.F);
cudaErrorCheck(err);
free(h_mol.x);
free(h_mol.v);
free(h_mol.a);
free(h_mol.F);
printf("We actually did it! \n");
return EXIT_SUCCESS;
}
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
void cudaErrorCheck(cudaError_t err)
{
if(err!=cudaSuccess)
{
fprintf(stderr, "cudaError (error code %s) \n",cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int init_particles(int n, float* x, float* v, params* param)
{
float sig = param->sig_lj;
float min_r2 = sig*sig;
float r2,dx,dy;
int i,j,trial;
for(i = 0; i < n; i++)
{
r2 = 0;
/* Choose new point via rejection sampling */
for(trial = 0; (trial < MAX_TRIALS) && (r2 < min_r2); trial++)
{
x[2*i] = (float) (BOX_SIZE*drand48()) - BOX_SIZE/2.0;
x[2*i+1] = (float) (BOX_SIZE*drand48()) - BOX_SIZE/2.0;
for(j=0; j < i; j++)
{
dx = x[2*i] - x[2*j];
dy = x[2*i+1] - x[2*j+1];
r2 = dx*dx + dy*dy;
//printf("Sample%d:%d %f %f %f %f\n",i,j,min_r2,r2,dx,dy);
if(r2 < min_r2)
break;
}
}
/* If it takes too many trials, bail and declare number set up */
if(i > 0 && r2 < min_r2)
return i;
}
return n;
}
void init_particles_va(int n, float* v,float* a, params* param)
{
float R,T;
int i;
for(i=0; i < n; i++)
{
R = T0 * sqrt(-2.0 * log(drand48()));
T = 2 * PI * drand48();
v[2*i] = (R * cos(T));
v[2*i+1] = (R * sin(T));
// printf("SampleVel%d %f %f\n",i,v[2*i],v[2*i+1]);
a[2*i] = (R * cos(T))/param->dt;
a[2*i+1] = (R * sin(T))/param->dt;
}
}
__device__ float compute_LJ_Scalar(float r2, float eps, float sig2)
{
if(r2 < (CUT2 * sig2)) //
{
float frac2 = sig2/r2;
float frac6 = frac2*frac2*frac2;
return 24.0*eps/r2 * frac6 *(1.0-2.0*frac6);
}
return 0;
}
__device__ void verletInt1(int k, float dt, float* x, float* v, float* a)
{
int two_i = 2*k; // assumes that we havbe 2D data
v[two_i] = a[two_i] * (dt/2.0); // spltwo_it up for a 2D
v[two_i+1] = a[two_i+1] * (dt/2.0);
x[two_i] = v[two_i] * dt;
x[two_i+1] = v[two_i+1] * dt;
}
__device__ void verletInt2(int k, float dt, float* x, float* v, float* a)
{
int two_i = 2*k;
int v0 = v[two_i];
int v1 = v[two_i+1];
v[two_i] = a[two_i] * dt/2.0; // spltwo_it up for 2D
v[two_i+1] = a[two_i+1] * dt/2.0;
a[two_i] += (v[two_i]-v0)/dt;
a[two_i+1] += (v[two_i+1]-v1)/dt;
}
// should check for reflection inbetween
__device__ void reflect(float wall, float* x, float* v, float* a)
{
//printf("reflected!");
*x = (2*wall-(*x));
*v = -(*v);
*a = -(*a);
}
__device__ void box_reflect(int k, float* x, float* v, float* a)
{
int two_i = 2*k;
if(x[two_i] < XMIN) reflect(XMIN,&x[two_i],&v[two_i],&a[two_i]);
if(x[two_i] > XMAX) reflect(XMAX,&x[two_i],&v[two_i],&a[two_i]);
if(x[two_i+1] < YMIN) reflect(YMIN,&x[two_i+1],&v[two_i+1],&a[two_i+1]);
if(x[two_i+1] > YMAX) reflect(YMAX,&x[two_i+1],&v[two_i+1],&a[two_i+1]);
}
// now only executes over the bodies declared!
// n is me and k is them
__device__ void compute_forces_naive(int n, int k, float* x, float* F)
{
float eps = EPS;
float sig = SIG;
float sig2 = sig*sig;
float dx,dy,lj_scalar;
dx = x[2*k] - x[2*n];
dy = x[2*k+1] - x[2*n+1];
lj_scalar = compute_LJ_Scalar(dx*dx+dy*dy,eps,sig2);
F[2*n] += lj_scalar * dx; // pos account for the direction of the vector from base molecule
F[2*n+1] += lj_scalar * dy;
}
|
19,103 | #include <cuda.h>
#include <stdio.h>
int main(int argc,char *argv[]){
if(argc<3){
printf("Usage: ./test.cu <ptx_file> <cuda_device>\n");
exit(0);
}
// Error code
CUresult error;
// My number
unsigned int h_var=7;
// Initialize driver API
error = cuInit(0);
if((int)error!=0){
printf("Error! cuInit returned: %d\n",(int)error);
exit(0);
}
// Get Cuda Device and give handle
CUdevice cu_device;
error = cuDeviceGet(&cu_device,atoi(argv[2]));
if((int)error!=0){
printf("Error! cuDeviceGet returned: %d\n",(int)error);
exit(0);
}
// Create context to run on device
CUcontext cu_context;
error = cuCtxCreate(&cu_context, 0, cu_device);
if((int)error!=0){
printf("Error! cuCtxCreate returned: %d\n",(int)error);
exit(0);
}
// Load ptx code
CUmodule cu_module;
error = cuModuleLoad(&cu_module,argv[1]);
if((int)error!=0){
printf("Error! cuModuleLoad returned: %d\n",(int)error);
exit(0);
}
// Get kernel function
CUfunction func;
error = cuModuleGetFunction(&func,cu_module,"testing");
if((int)error!=0){
printf("Error! cuModuleGetFunction returned: %d\n",(int)error);
exit(0);
}
CUdeviceptr var;
// Allocate device memory
unsigned int size = sizeof(unsigned int);
error = cuMemAlloc(&var, size);
if((int)error!=0){
printf("Error! cuMemAlloc returned: %d\n",(int)error);
exit(0);
}
// Copy variable to host
error = cuMemcpyHtoD(var,&h_var,size);
if((int)error!=0){
printf("Error! cuMemcpyHtoD returned: %d\n",(int)error);
exit(0);
}
// Lauch kernel
void *args[] = {&var};
error = cuLaunchKernel(func, 1, 1, 1, 1, 1, 1, 0, NULL, args, NULL);
if((int)error!=0){
printf("Error! cuLaunchKernel returned: %d\n",(int)error);
exit(0);
}
// Get result to host
error = cuMemcpyDtoH(&h_var,var,size);
if((int)error!=0){
printf("Error! cuMemcpyDtoH returned: %d\n",(int)error);
exit(0);
}
// Free device memory
error = cuMemFree(var);
if((int)error!=0){
printf("Error! cuMemFree returned: %d\n",(int)error);
exit(0);
}
// Destroy context
error = cuCtxDestroy(cu_context);
if((int)error!=0){
printf("Error! cuCtxDestroy returned: %d\n",(int)error);
exit(0);
}
// Print result
printf("var: %d\n",h_var);
}
|
19,104 | #include "includes.h"
__global__ void saxpy_baseline ( float* y, float* x, float a, clock_t * timer_vals)
{
for (int i=0; i < NUM_ITERS; i++) {
unsigned int idx = i * COMPUTE_THREADS_PER_CTA * CTA_COUNT + blockIdx.x * COMPUTE_THREADS_PER_CTA + threadIdx.x;
y[idx] = a * x[idx] + y[idx];
}
} |
19,105 | #include "DES-Cracker.cuh"
static __device__ __constant__ int E[48] = {
32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1
};
static __device__ __constant__ int P[32] = {
16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25
};
static __device__ __constant__ unsigned long S[8][4][16] = {
{
{ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7 },
{ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8 },
{ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0 },
{ 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 }
},
{
{ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10 },
{ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5 },
{ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15 },
{ 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 }
},
{
{ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8 },
{ 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1 },
{ 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7 },
{ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 }
},
{
{ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15 },
{ 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9 },
{ 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4 },
{ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 }
},
{
{ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9 },
{ 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6 },
{ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14 },
{ 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 }
},
{
{ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11 },
{ 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8 },
{ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6 },
{ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 }
},
{
{ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1 },
{ 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6 },
{ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2 },
{ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12 }
},
{
{ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7 },
{ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2 },
{ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8 },
{ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11 }
}
};
static __device__ __constant__ int IP_tab[64] = {
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7
};
static __device__ __constant__ int IPminus[64] = {
40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25
};
static __device__ __constant__ int PC1[56] = {
57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4
};
static __device__ __constant__ int PC2[48] = {
14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32
};
static __device__ __constant__ int shifts[16] = { 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 };
__device__ uint64_t rotate_left_28(uint64_t bits, uint32_t pos)
{
return bits >> (28 - pos) | bits << pos;
}
__device__ uint64_t rotate_right_28(uint64_t bits, uint32_t pos)
{
return bits << (28 - pos) | bits >> pos;
}
//first bit on the left is on position 1
__device__ uint32_t get_bit(uint64_t bits, uint32_t pos)
{
return (bits >> (64 - pos)) & 0x01;
}
//first bit on the left is on position 1
__device__ uint64_t set_bit(uint64_t& bits, uint32_t pos, uint32_t value)
{
uint64_t mask = 1LL << (64 - pos);
if (value)
bits |= mask;
else
bits &= ~mask;
return bits;
}
__device__ uint64_t f(uint64_t data, uint64_t key)
{
int i, j;
uint64_t E_data = 0LL;
for (i = 1; i <= 48; ++i)
{
set_bit(E_data, i, get_bit(data, E[i - 1]));
}
//print_uint64_t(E_data, 48, "E(R)");
uint64_t result = E_data^key;
//print_uint64_t(result, 48, "E(R)+K");
uint64_t B[8];
for (i = 1; i <= 8; ++i)
{
B[i - 1] = 0LL;
for (j = 1; j <= 6; ++j)
{
set_bit(B[i - 1], j, get_bit(result, (i - 1) * 6 + j));
}
//print_uint64_t(B[i - 1], 6, "B_" + std::to_string(i));
}
uint64_t SB[80];
uint64_t temp = 0LL;
uint32_t r, c;
for (i = 1; i <= 8; ++i)
{
SB[i - 1] = 0LL;
r = 2 * get_bit(B[i - 1], 1) + get_bit(B[i - 1], 6);
c = 8 * get_bit(B[i - 1], 2) + 4 * get_bit(B[i - 1], 3) + 2 * get_bit(B[i - 1], 4) + get_bit(B[i - 1], 5);
temp = S[i - 1][r][c];
for (j = 1; j <= 4; ++j)
{
set_bit(SB[i - 1], j, get_bit(temp, 60 + j));
}
}
/*for (i = 1; i <= 8; ++i)
{
print_uint64_t(SB[i - 1], 4, "SB_" + std::to_string(i));
}*/
uint64_t SBconcat = 0LL;
for (i = 1; i <= 8; ++i)
{
SBconcat = SBconcat << 4;
SBconcat = SBconcat | (SB[i - 1] >> 60);
}
SBconcat = SBconcat << 32;
//print_uint64_t(SBconcat, 32, "SBconcat");
uint64_t f_res = 0LL;
for (i = 1; i <= 32; ++i)
{
set_bit(f_res, i, get_bit(SBconcat, P[i - 1]));
}
//print_uint64_t(f_res, 32, "f_res");
return f_res;
}
__device__ uint64_t encrypt_no_permutations(uint64_t M, uint64_t K0)
{
//STEP 1: Create 16 subkeys, each of which is 48-bits long
int i, b;
uint64_t Kplus = 0LL;
for (i = 1; i <= 56; ++i)
{
set_bit(Kplus, i, get_bit(K0, PC1[i - 1]));
}
uint64_t C[17], D[17];
C[0] = 0LL;
D[0] = 0LL;
for (i = 1; i <= 28; ++i)
{
set_bit(C[0], i, get_bit(Kplus, i));
set_bit(D[0], i, get_bit(Kplus, 28 + i));
}
for (i = 1; i <= 16; ++i)
{
C[i] = 0LL;
D[i] = 0LL;
C[i] = rotate_left_28(C[i - 1], shifts[i - 1]);
D[i] = rotate_left_28(D[i - 1], shifts[i - 1]);
}
uint64_t K[16];
for (i = 1; i <= 16; ++i)
{
K[i - 1] = 0LL;
for (b = 1; b <= 48; ++b)
{
set_bit(K[i - 1], b, PC2[b - 1] > 28 ? get_bit(D[i], PC2[b - 1] - 28) : get_bit(C[i], PC2[b - 1]));
}
}
//STEP 2: Encode each 64-bit block of data
uint64_t L[17], R[17];
L[0] = 0LL;
R[0] = 0LL;
for (i = 1; i <= 32; ++i)
{
set_bit(L[0], i, get_bit(M, i));
set_bit(R[0], i, get_bit(M, 32 + i));
}
for (i = 1; i <= 16; ++i)
{
L[i] = 0LL;
R[i] = 0LL;
L[i] = R[i - 1];
R[i] = L[i - 1] ^ f(R[i - 1], K[i - 1]);
}
uint64_t R16L16 = 0LL;
for (i = 1; i <= 64; ++i)
{
set_bit(R16L16, i, i <= 32 ? get_bit(R[16], i) : get_bit(L[16], i - 32));
}
return R16L16;
}
__device__ uint64_t encrypt(uint64_t M, uint64_t K0)
{
int i;
uint64_t IP = 0LL;
for (i = 1; i <= 64; ++i)
{
set_bit(IP, i, get_bit(M, IP_tab[i - 1]));
}
uint64_t R16L16 = encrypt_no_permutations(IP, K0);
uint64_t result = 0LL;
for (i = 1; i <= 64; ++i)
{
set_bit(result, i, get_bit(R16L16, IPminus[i - 1]));
}
return result;
}
__global__ void check_keys(uint64_t plaintext, uint64_t base_key, uint64_t ciphertext, uint64_t *result, bool *is_found_key)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
uint64_t try_key = base_key + tid;
uint64_t try_ciphertext = encrypt(plaintext, try_key);
if (try_ciphertext == ciphertext)
{
*is_found_key = true;
*result = try_key;
}
}
__global__ void check_keys_no_permutations(uint64_t plaintext_no_permutations, uint64_t base_key, uint64_t ciphertext, uint64_t *result, bool *is_found_key)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
uint64_t try_key = base_key + tid;
uint64_t try_ciphertext = encrypt_no_permutations(plaintext_no_permutations, try_key);
if (try_ciphertext == ciphertext)
{
*is_found_key = true;
*result = try_key;
}
} |
19,106 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void vecMult(double *d_vecA,unsigned long dist,unsigned long n,unsigned long tam_tot){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n){
d_vecA[global_id*dist] = d_vecA[global_id*dist] + d_vecA[global_id*dist+dist / 2];
if(dist == tam_tot) {
d_vecA[global_id*dist] /= tam_tot;
}
}
}
int main(int argc, char *argv[]){
if (argc != 2){
printf("Falta argumento: N\n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = 128;
unsigned long numBytes = sizeof(double)*N,tam_tot;
double *vecA,result,*d_vecA,timetick;
unsigned int i;
vecA = (double *)malloc(numBytes);
result = 0;
for (i = 0; i < N; i++){
vecA[i] = i;
}
tam_tot = N;
cudaMalloc((void **) &d_vecA, numBytes);
// Bloque unidimencional de hilos (*cb* hilos)
dim3 dimBlock(CUDA_BLK);
//promedio
timetick = dwalltime();
cudaMemcpy(d_vecA, vecA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
for(i = 2; i <= N ;i *= 2){
dim3 dimGrid((N / i + dimBlock.x - 1) / dimBlock.x);
vecMult<<<dimGrid, dimBlock>>>(d_vecA,i,N/i,tam_tot);
cudaThreadSynchronize();
}
cudaMemcpy(&result, d_vecA, sizeof(double), cudaMemcpyDeviceToHost); // GPU -> CPU
printf("Tiempo para la GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
printf("resultadoGPU: %f\n",result);
cudaFree(d_vecA);
free(vecA);
return 0;
} |
19,107 | #include<cstdio>
extern "C" {
__global__
void HelloWorld(){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
printf("Hello World! thread #%d\n", thid);
}
}
|
19,108 | #include <random>
#include <assert.h>
#include <chrono>
#include <iostream>
using real = float;
#define DEBUG
const real tau_v = 20.;
const real tau_exc = 5.;
const real tau_inh = 10.;
const real v_thresh = -50.;
const real v_reset = -60.;
const real v_rest = -49.;
const real wgt_exc = 60.*.27/5;
const real wgt_inh = -20*4.5/10;
const real ts = .1; // ms
// refractory period is 5ms
// gotta manually divide 5 / ts here because of FP error...
const unsigned char refractory_cycles = 50;
// Simulation parameters
const size_t N = 100000;
const size_t delay = 8;
const size_t max_conns_per_neuron = 1000;
const real seconds = 10.;
const size_t num_iterations = seconds / (ts * 1e-3) / delay;
const double resulting_sparsity = 1. * max_conns_per_neuron / N;
const size_t N_exc = N * 4 / 5;
// gpu optimization params
const size_t threads_per_block = 1000;
const size_t n_blocks = N / threads_per_block;
struct Injection {
real exc, inh;
Injection(real a, real b): exc(a), inh(b) {}
Injection() : exc(0.), inh(0.) {}
};
struct Connection {
unsigned int idx;
real wgt;
Connection(unsigned int d, real w) : idx(d), wgt(w) {}
Connection() : idx(0), wgt(0.) {}
};
const size_t bank_size = N * delay;
#define injection(polarity, delay_idx, neuron_idx) (bank_injections[polarity * bank_size + delay_idx * N + neuron_idx])
#define connection(neuron_idx, synapse_idx) (connections[neuron_idx * max_conns_per_neuron + synapse_idx])
unsigned char __ping_var = 0;
#include <stdio.h>
#define ping() fprintf(stderr, "ping %d\n", __ping_var++); fflush(stderr);
__global__
void iterate(real * v, real * ge, real * gi, unsigned char * refrac, Connection * connections, Injection * bank_injections, bool polarity
#ifdef DEBUG
, int *nspikes
#endif
) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ bool spikes[threads_per_block];
for (unsigned char delay_idx = 0; delay_idx < delay; ++delay_idx) {
real dv = (ge[idx] + gi[idx] - (v[idx] - v_rest)) / tau_v;
real dge = -ge[idx] / tau_exc;
real dgi = -gi[idx] / tau_inh;
if (refrac[idx]) {
--refrac[idx];
dv = 0.;
}
// read once to local register
real v_ = v[idx] + dv * ts;
Injection *inj = &injection(polarity, delay_idx, idx);
ge[idx] += inj->exc + dge * ts;
gi[idx] += inj->inh + dgi * ts;
inj->exc = 0.;
inj->inh = 0.;
bool spiked = v_ > v_thresh;
spikes[threadIdx.x] = spiked;
if (spiked) {
v_ = v_reset;
refrac[idx] = refractory_cycles;
}
v[idx] = v_;
__syncthreads();
for (unsigned int lidx = 0; lidx < blockDim.x; ++lidx) {
// this actually isn't that bad because either all threads take it or all don't
if (spikes[lidx]) {
#ifdef DEBUG
if (threadIdx.x == 0)
atomicAdd(nspikes, 1);
#endif
size_t tidx = blockDim.x * blockIdx.x + lidx;
Connection c = connection(tidx, threadIdx.x);
real wgt = c.wgt;
// We read from polarity, and write to ~polarity to avoid race conditions across blocks
if (c.wgt > 0)
atomicAdd(&injection(!polarity, delay_idx, c.idx).exc, wgt);
else
atomicAdd(&injection(!polarity, delay_idx, c.idx).inh, wgt);
}
}
}
}
int main() {
std::default_random_engine gen;
std::uniform_real_distribution<> voltage_dist(v_reset, v_thresh);
std::poisson_distribution<> connection_dist(N / max_conns_per_neuron);
// std::uniform_real_distribution<> unit_dist(0., 1.);
real * neuron_v;
real * cuda_neuron_v;
real * neuron_ge;
real * cuda_neuron_ge;
real * neuron_gi;
real * cuda_neuron_gi;
unsigned char * neuron_ref_cycles_rem;
unsigned char * cuda_neuron_ref_cycles_rem;
Connection * connections;
Connection * cuda_connections;
Injection * bank_injections;
Injection * cuda_bank_injections;
// allocate
neuron_v = new real[N];
assert(cudaSuccess == cudaMalloc(&cuda_neuron_v, sizeof(real) * N));
neuron_ge = new real[N];
assert(cudaSuccess == cudaMalloc(&cuda_neuron_ge, sizeof(real) * N));
neuron_gi = new real[N];
assert(cudaSuccess == cudaMalloc(&cuda_neuron_gi, sizeof(real) * N));
neuron_ref_cycles_rem = new unsigned char[N];
assert(cudaSuccess == cudaMalloc(&cuda_neuron_ref_cycles_rem, sizeof(unsigned char) * N));
connections = new Connection[max_conns_per_neuron * N];
assert(cudaSuccess == cudaMalloc(&cuda_connections, sizeof(Connection) * max_conns_per_neuron * N));
bank_injections = new Injection[2 * N * delay];
assert(cudaSuccess == cudaMalloc(&cuda_bank_injections, sizeof(Injection) * 2 * N * delay));
ping();
// initialize
for (size_t i = 0; i < N; ++i) {
neuron_v[i] = voltage_dist(gen);
neuron_ge[i] = neuron_gi[i] = 0.;
neuron_ref_cycles_rem[i] = 0;
size_t synapse_idx = connection_dist(gen) - 1;
for (unsigned conn_idx = 0; conn_idx < 1000 && synapse_idx < N; ++conn_idx) {
real wgt = (i < N_exc) ? wgt_exc : wgt_inh;
connection(i, conn_idx) = Connection(synapse_idx, wgt);
synapse_idx += connection_dist(gen);
}
}
ping();
// copy to GPU
assert(cudaSuccess == cudaMemcpy(cuda_neuron_v, neuron_v, sizeof(real) * N, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_neuron_ge, neuron_ge, sizeof(real) * N, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_neuron_gi, neuron_gi, sizeof(real) * N, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_neuron_ref_cycles_rem, neuron_ref_cycles_rem, sizeof(unsigned char) * N, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_connections, connections, sizeof(Connection) * N * max_conns_per_neuron, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_bank_injections, bank_injections, sizeof(Injection) * 2 * N * delay, cudaMemcpyHostToDevice));
ping();
#ifdef DEBUG
int nspikes = 0;
int * cuda_nspikes;
assert(cudaSuccess == cudaMalloc(&cuda_nspikes, sizeof(int)));
assert(cudaSuccess == cudaMemcpy(cuda_nspikes, &nspikes, sizeof(int), cudaMemcpyHostToDevice));
#endif
// run
bool polarity = false;
std::cout << "begin!" << std::endl;
auto t1 = std::chrono::high_resolution_clock::now();
for (size_t it = 0; it < num_iterations; ++it) {
iterate<<<n_blocks, threads_per_block>>>(cuda_neuron_v, cuda_neuron_ge, cuda_neuron_gi, cuda_neuron_ref_cycles_rem, cuda_connections, cuda_bank_injections, polarity
#ifdef DEBUG
, cuda_nspikes
#endif
);
polarity = !polarity;
}
auto t2 = std::chrono::high_resolution_clock::now();
auto diff = (t2 - t1);
std::cout << "Time Elapsed: " << (diff.count() / 1e9) << std::endl;
#ifdef DEBUG
cudaMemcpy(&nspikes, cuda_nspikes, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "Firing Rate: " << (1. * nspikes / N / seconds) << "Hz" << std::endl;
#endif
}
|
19,109 | /*
Soma dois vetores
Ilustra a alocação dinâmica da memoria compartilhada
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define TAM 16
#define TPB 2
__global__ void soma(int *vetA_glb, int *vetB_glb,int *vetC_glb){
// alocacao dinamica de vetC_shd
extern __shared__ int vetC_shd[];
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < TAM)
{
vetC_shd[idx]=vetA_glb[idx]+vetB_glb[idx];
vetC_glb[idx]=vetC_shd[idx];
}
}
int main(int argc,char **argv){
int *vetA_h,*vetB_h,*vetC_h;
int *vetA_d,*vetB_d,*vetC_d;
int i, blocksPerGrid;
//Aloca os vetores no host
vetA_h=(int *)malloc(TAM * sizeof(int));
vetB_h=(int *)malloc(TAM * sizeof(int));
vetC_h=(int *)malloc(TAM * sizeof(int));
//Aloca os vetores no device
cudaMalloc((void**)&vetA_d,TAM*(sizeof(int)));
cudaMalloc((void**)&vetB_d,TAM*(sizeof(int)));
cudaMalloc((void**)&vetC_d,TAM*(sizeof(int)));
//Preenche os vetores no host
for(i=0;i<TAM;i++){
vetA_h[i]=i;
vetB_h[i]=-i;
}
//Copia o conteúdo dos vetores para o device
cudaMemcpy(vetA_d,vetA_h,TAM*(sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(vetB_d,vetB_h,TAM*(sizeof(int)), cudaMemcpyHostToDevice);
//Define a quantidade de blocos por grade
blocksPerGrid=(TAM+TPB-1)/TPB;
//Calcula o tempo de execução do kernel com eventos
// cudaEvent_t start, stop;
// float gpu_time = 0.0f;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start, 0);
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
soma <<<blocksPerGrid,TPB,TAM*sizeof(int)>>> (vetA_d,vetB_d,vetC_d);
//Calcula o tempo de execução do kernel com eventos
// cudaThreadSynchronize();
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&gpu_time, start, stop);
// printf("%.5f\n", gpu_time);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
//Copia o resultado da soma de volta para o host
cudaMemcpy(vetC_h,vetC_d,TAM*(sizeof(int)), cudaMemcpyDeviceToHost);
//Imprime o resultado no host
for(i=0;i<TAM;i++){
printf("%d ",vetC_h[i]);
}
//Desaloca os vetores no host
free(vetA_h);
free(vetB_h);
free(vetC_h);
//Desaloca os vetores no device
cudaFree(vetA_d);
cudaFree(vetB_d);
cudaFree(vetC_d);
} |
19,110 | #include <iostream>
#include <math.h>
int main() {
float *inputs, *weights, *bias, *output;
cudaMallocManaged(&inputs, 3*sizeof(float));
cudaMallocManaged(&weights, 3*sizeof(float));
cudaMallocManaged(&bias, sizeof(float));
cudaMallocManaged(&output, sizeof(float));
inputs[0] = 1.0f;
inputs[1] = 2.0f;
inputs[2] = 3.0f;
weights[0] = 3.1f;
weights[1] = 2.1f;
weights[2] = 8.7f;
bias[0] = 3.0f;
output[0] = inputs[0] * weights[0] + inputs[1] * weights[1] +
inputs[2] * weights[2] + bias[0];
std::cout << output[0] << std::endl;
cudaFree(inputs);
cudaFree(weights);
cudaFree(bias);
cudaFree(output);
}
|
19,111 | #include "includes.h"
__global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {} |
19,112 | #include <math_constants.h>
#define RADIUS_IN_KM 6372.8
extern "C"
// Computes the haversine distance betwwen two points on Earth
__global__ void haversine(int *size, double *in, double *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size ) {
const int lat1ix = 4*ix,lon1ix = (4*ix)+1,lat2ix = (4*ix)+2, lon2ix = (4*ix)+3;
const double dLat = (in[lat2ix] - in[lat1ix] ) * (CUDART_PI_F /180.0);
const double dLon = (in[lon2ix] - in[lon1ix] ) * (CUDART_PI_F /180.0);
const double a = pow(sin(dLat/2.0),2.0) + pow(sin(dLon/2.0),2.0) * cos(in[lat1ix] * (CUDART_PI_F/180.0)) * cos(in[lat2ix] * (CUDART_PI_F/180.0));
const double c = 2.0 * asin(sqrt(a));
out[ix] = RADIUS_IN_KM * c;
}
}
|
19,113 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <iostream>
#include <fstream>
using namespace std;
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
// Handle error
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
__global__ void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
y[i] = a*x[i] + y[i];
}
}
double min(double* array, int size){
// returns the minimum value of array
double val = array[0];
for (int i = 1; i < size; ++i){
val = val <= array[i] ? val : array[i];
}
return val;
}
int main(int argc, char * argv[])
{
unsigned long int arrlength= atoi(argv[1]);
unsigned long int N = 1<<arrlength;
unsigned long int nruns = atoi(argv[2]);
unsigned long int neval = atoi(argv[3]);
size_t size = N*sizeof(float);
double seconds[neval];
float *x, *y; // Host vectors
float *d_x, *d_y; // Device vectors
// Allocate host memory
x = (float *)malloc(size);
y = (float *)malloc(size);
// Allocate device memory
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
for (unsigned long int i = 0; i < N; i++){
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0, d_x, d_y);
cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
for(unsigned long int run = 0; run < neval; run++)
{
// Allocate device memory
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
double wall_timestart = get_wall_time();
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
for(unsigned long int count = 0; count < nruns; count ++){
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0, d_x, d_y);
}
cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost);
double wall_timestop = get_wall_time();
cudaFree(d_x);
cudaFree(d_y);
seconds[run] = wall_timestop - wall_timestart;
}
free(x);
free(y);
cout<<nruns<<"\t\t"<<min(seconds,neval)<<endl;
}
|
19,114 | #include <iostream>
#include <stdio.h>
#include <sys/time.h>
#include <cuda.h>
using namespace std;
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}}
__global__ void dgemm(float *A, float *B, float *C, int threads_per_block, int n)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float sum = 0.0f;
int ia = n * (threads_per_block * by + ty); // Номер строки из A
int ib = threads_per_block * bx + tx; // Номер столбца из B
int ic = ia + ib; // Номер элемента из C
for (int k = 0; k < n; k++)
sum += A[ia + k] * B[ib + k * n];
C[ic] = sum;
}
void InitMatrix(float *A, float *B, float *C, int size)
{
for (int i = 0; i < size; i++)
for (int j = 0; j < size; j++) {
int k = size * i + j;
A[k] = rand();
B[k] = rand();
C[k] = 0.0;
}
}
void printMatrix(float *C, int size)
{
for (int i = 0; i < size * size; i++)
cout << C[i] << "\t";
cout << endl;
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
int main(int argc, char* argv[])
{
if (argc != 4) {
cout << "launch parametrs: [matrix size] [threads_x] [threads_y]" << endl;
return 1;
}
int size = atoi(argv[1]);
int threads_per_block_x = atoi(argv[2]);
int threads_per_block_y = atoi(argv[3]);
srand(time(NULL));
float *A = new float[size * size];
float *B = new float[size * size];
float *C = new float[size * size];
float *dev_A, *dev_B, *dev_C;
cudaMalloc((void**)&dev_A, size * size * sizeof(float));
cudaMalloc((void**)&dev_B, size * size * sizeof(float));
cudaMalloc((void**)&dev_C, size * size * sizeof(float));
InitMatrix(A, B, C, size);
dim3 threads(threads_per_block_x, threads_per_block_y);
dim3 blocks(size / threads.x, size / threads.y);
cudaMemcpy(dev_A, A, size * size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, B, size * size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_C, C, size * size * sizeof(float), cudaMemcpyHostToDevice);
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dgemm <<< blocks, threads >>> (dev_A, dev_B, dev_C, threads_per_block_x, size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(C, dev_C, size * size * sizeof(float), cudaMemcpyDeviceToHost);
cout << "time: " << elapsedTime << " ms" << endl;
printMatrix(C, size);
delete [] A; delete [] B; delete [] C;
cudaEventDestroy(start); cudaEventDestroy(stop);
cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C);
return 0;
}
|
19,115 | #include "includes.h"
__global__ void grad_descent(float *odata, const float *idata, int size) {
int t = blockIdx.x * blockDim.x + threadIdx.x;
if (t < size) {
odata[t] -= LEARNIG_RATE * idata[t];
}
} |
19,116 | #include <cuda.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
using namespace std;
/*MatVecMul_Kernel*/
__global__
void MatVecMul_Kernel(float* A, float* B, float* C, int n) {
int i = threadIdx.x;
int offset;
float sum = 0;
if (i < n) {
for (int j = 0; j < n; j++) {
offset = i*n + j;
sum += A[offset] * B[j];
}
C[i] = sum;
}
}
/*MatVecMul_GPU*/
void MatVecMul_GPU(float* h_A, float* h_B, float* h_C, int n) {
int sizeM = n*n * sizeof(float);
int sizeV = n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
cudaMalloc(&d_A, sizeM);
cudaMemcpy(d_A, h_A, sizeM, cudaMemcpyHostToDevice);
cudaMalloc(&d_B, sizeV);
cudaMemcpy(d_B, h_B, sizeV, cudaMemcpyHostToDevice);
cudaMalloc(&d_C, sizeV);
cudaMemcpy(d_C, h_C, sizeV, cudaMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatVecMul_Kernel <<< 1, 10 >>> (d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, sizeV, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
//Host Matrix
float *h_A, *h_B, *h_C;
int n = 10;
h_A = (float*)malloc(n*n * sizeof(float));
h_B = (float*)malloc(n * sizeof(float));
h_C = (float*)malloc(n * sizeof(float));
//Create Matrix
for (int i = 0; i < n*n; i++) {
h_A[i] = 1.0;
}
//Create Vector
for (int i = 0; i < n; i++) {
h_B[i] = 1.0;
h_C[i] = 1.0;
}
//MatVecMul (Main)
chrono::time_point<chrono::system_clock> MatVecMul_GPU_Start, MatVecMul_GPU_End;
MatVecMul_GPU_Start = chrono::system_clock::now();
MatVecMul_GPU(h_A, h_B, h_C, n);
MatVecMul_GPU_End = chrono::system_clock::now();
cout << "MatVecMul_GPU: " << chrono::duration_cast<chrono::nanoseconds>(MatVecMul_GPU_End - MatVecMul_GPU_Start).count() << "ns." << endl;
//Print MatVecMul
for (int i = 0; i < n; i++) {
cout << h_C[i] << " ";
}
cout << endl;
//Free
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
19,117 | #include "includes.h"
__global__ void kernel0(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
//comment out this for-loop and uncomment the code in the main function for getting correct results
for (int i = 0; i < n; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
if (i < n){
y[i] = a*x[i] + y[i];
}
} |
19,118 | #include "includes.h"
__global__ void calibrate_fix2float(float * dst, const float* sA, const float* sB, float alpha, float beta, int height, int width, int threads) {
int ri = blockIdx.x;
int tid = threadIdx.x;
int loop = (width / threads) + ((width % threads == 0) ? 0 : 1);
float rscale = (sA[ri] == 0.0f) ? 1.0f : sA[ri];
float * data = dst + width * ri;
int idx = 0;
for (int i = 0; i < loop; ++i) {
if(idx + tid < width){
float temp = data[idx + tid];
float cscale = (sB[idx + tid] == 0.0f) ? 255.0f : sB[idx + tid];
data[idx + tid] = beta * temp + alpha * temp * rscale * cscale;
}
idx += threads;
}
} |
19,119 | #include <stdio.h>
#include <stdlib.h>
__constant__ unsigned long long fatorial[20] = {1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800, 39916800, 479001600, 6227020800, 87178291200, 1307674368000, 20922789888000, 355687428096000, 6402373705728000, 121645100408832000};
/*
__device__
unsigned long long fatorial(int n){
unsigned long long result = 1;
int i;
for(i = n; i > 1; i--){
result *= i;
}
return result;
}*/
//Coloca o elemento da variável pos na primeira posição, e da um shift para a direia nos outros
__device__
void shiftElement(char* dest, int pos){
char temp = dest[pos];
int i;
for(i = 0; i < pos; i++){
dest[pos-i] = dest[pos-i-1];
}
dest[0] = temp;
}
__device__
void getSequenceLexicographically(char* dest, int n, unsigned long long index){
//Cria o vetor primário de tamanho N
int i;
for(i = 0; i < n; i++)
dest[i] = i+4; //Começar do numero 4
//Percorre o vetor
for(i = 0; i < n-1; i++){
//Calcula quantas alterações são possiveis sem alterar o primeiro elemento atual
unsigned long long fat = fatorial[n-i-1];
//Calcula quantas vezes foi possível trocar essa posição
int num_movimentos = index/fat;
if(num_movimentos > 0){
shiftElement(dest, num_movimentos);
//Diminui a quantidade ja calcula do indice
index -= num_movimentos*fat;
}
dest++;
}
}
__device__
void getSequence(char* dest, int n, unsigned long long index){
unsigned int numDeslocamentos2e3 = index/fatorial[n-3];
unsigned int indexResto = index%fatorial[n-3];
unsigned int pos_num2 = 1;
unsigned int pos_num3;
int i;
for(i = 0; numDeslocamentos2e3; i++){
if(numDeslocamentos2e3 >= (n-2-i)){
pos_num2++;
numDeslocamentos2e3 -= (n-2-i);
}
else{
pos_num3 = pos_num2 + 1 + numDeslocamentos2e3;
break;
}
}
if(numDeslocamentos2e3 == 0){
pos_num3 = pos_num2+1;
}
getSequenceLexicographically(dest+3, n-3, indexResto);
dest[0] = (char) 1;
for(i = 1; i < pos_num2; i++){
dest[i] = dest[i+2];
}
dest[pos_num2] = (char) 2;
for(i = pos_num2+1; i < pos_num3; i++){
dest[i] = dest[i+1];
}
dest[pos_num3] = (char) 3;
}
//Pega a quantidade de valores menores que num na variável vet.
__device__
int qtdMenores(char* vet, int num, int n){
int qtd = 0;
int i;
for(i = 0; i < n; i++){
if(vet[i] < num)
qtd++;
}
return qtd;
}
__device__
unsigned long long getIndexLexicographically(char* vet, int n){
unsigned long long index = 0;
int i;
for(i = 0; i < n-1; i++){
index += qtdMenores(vet+i+1, vet[i], n-i-1)*fatorial[n-i-1];
}
return index;
}
__device__
unsigned long long getIndex(char* vet, int n){
unsigned long long index = 0;
int i;
int pos_num2 = -1, pos_num3 = -1;
//calcula os valores dos index considerando somente o 2 e 3
for(i = 1; i < n; i++){
if(pos_num2 == -1){
if(vet[i] == 2){
pos_num2 = i;
}
else{
index += (n-i-1)*fatorial[n-3];
}
}
else{
if(vet[i] == 3){
pos_num3 = i;
break;
}
else{
index += fatorial[n-3];
}
}
}
//calcula o valor dos index considerando os outros valores de N
int pos = 0;
for(i = 1; i < n; i++){
if(i != pos_num2 && i != pos_num3){
vet[pos] = vet[i];
pos++;
}
}
index += getIndexLexicographically(vet, n-3);
return index;
} |
19,120 | #include <iostream>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
#define CUDA_CALL( call ) \
{ \
cudaError_t err = call; \
if ( cudaSuccess != err) \
fprintf(stderr, "CUDA error for %s in %d of %s : %s.\n", #call , __LINE__ , __FILE__ ,cudaGetErrorString(err));\
}
__global__ void printLinearVector_GPU(size_t* x, size_t i, size_t num_rows, size_t num_cols)
{
for ( int j = 0 ; j < num_cols ; j++ )
printf("%lu ", x[j+i*num_cols]);
printf("\n");
}
__host__ void printLinearVector(size_t* x, size_t num_rows, size_t num_cols)
{
for(int i = 0 ; i < num_rows ; i++ )
{
printLinearVector_GPU<<<1,1>>>(x, i, num_rows, num_cols);
cudaDeviceSynchronize();
}
}
// Determines 1-dimensional CUDA block and grid sizes based on the number of rows N
__host__
void calculateDimensions(size_t N, dim3 &gridDim, dim3 &blockDim)
{
if ( N <= 1024 )
{
blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1;
gridDim.x = 1; gridDim.y = 1; gridDim.z = 1;
}
else
{
blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1;
gridDim.x = (int)ceil(N/blockDim.x)+1; gridDim.y = 1; gridDim.z = 1;
}
}
/// r = A^T * x
/// NOTE: This kernel should be run with A's number of rows as the number of threads
__global__
void ApplyTransposed_GPU_(
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value,
const std::size_t* index,
const double* x,
double* r)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
for ( int n = 0; n < max_row_size; n++ )
{
int col = index [ id + n*num_rows ];
double val = value [ id + n*num_rows ];
atomicAdd( &r[col], val*x[id] );
}
}
}
// Ax = r
__global__ void Apply_GPU_ (
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value,
const std::size_t* index,
const double* x,
double* r)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
double sum = 0;
for ( int n = 0 ; n < max_row_size; n++ )
{
unsigned int offset = id + n*num_rows;
sum += value[offset] * x[index[offset]];
}
r[id] = sum;
}
}
__global__
void printVector_GPU(double* x, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
printf("%d %e\n", id, x[id]);
}
// returns value of a transposed ELLPack matrix A at (row,col)
__device__
double valueAt_(size_t row, size_t col, double* vValue, size_t* vIndex, size_t max_row_size, size_t num_rows)
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[k * num_rows + row] == col)
return vValue[k * num_rows + row];
}
return 0.0;
}
__global__
void printELL_GPU_(double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( int i = 0 ; i < num_rows ; i++)
{
for ( int j = 0 ; j < num_cols ; j++)
printf("%f ", valueAt_(i, j, value, index, max_row_size, num_rows) );
printf("\n");
}
}
// adds the value to a transposed ELLPack matrix A at (row,col)
__device__
void atomicAddAt_( size_t row, size_t col, double* vValue, size_t* vIndex, size_t max_row_size, size_t num_rows, double value )
{
for(size_t k = 0; k < max_row_size; ++k)
{
// printf("%d\n", (k * num_rows + y) );
if(vIndex[k * num_rows + col] == row)
{
atomicAdd( &vValue[k * num_rows + col] , value );
// vValue[k * num_rows + col] += value;
// printf("%f \n", vValue[k * num_rows + y]);
k = max_row_size; // to exit for loop
}
}
}
// A_coarse = R * A_fine * P
__global__ void PTAP( double* value, size_t* index, size_t max_row_size, size_t num_rows,
double* value_, size_t* index_, size_t max_row_size_, size_t num_rows_,
double* p_value, size_t* p_index, size_t p_max_row_size,
size_t lev)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if( id < num_rows )
{
for ( int i_ = 0 ; i_ < p_max_row_size ; i_++ )
{
size_t i = p_index[id + i_*num_rows];
double P_ki = p_value[id + i_*num_rows];
if(id==0) printf("i = %lu, P_ki = %f\n", i, P_ki);
// if ( id == 1) printf("%f\n", P_ki);
for( int l_ = 0 ; l_ < max_row_size ; l_++ )
{
size_t l = index[id + l_*num_rows];
double A_kl = value[id + l_*num_rows];
double P_ki_A_kl = P_ki * A_kl;
if(id==0) printf("l = %lu, A_kl = %f\n", l, A_kl);
if(id==0) printf("P_ki_A_kl = %f\n", P_ki_A_kl);
for( int j_ = 0 ; j_ < p_max_row_size ; j_++ )
{
size_t j = p_index[l + j_*num_rows];
if( j >= num_rows ) break;
double P_lj = p_value[l + j_*num_rows];
if(id==0) printf("j = %lu, P_lj = %f\n", j, P_lj);
double P_ki_A_kl_P_lj = P_ki_A_kl * P_lj;
if(id==0) printf("PAP(%lu,%lu) = %f\n", i,j,P_ki_A_kl_P_lj);
if(P_ki_A_kl_P_lj != 0.0)
atomicAddAt_( j, i, value_, index_, max_row_size_, num_rows_, P_ki_A_kl_P_lj );
}
}
}
// atomicAddAt_( 0, 0, value_, index_, max_row_size_, num_rows_, 10 );
// atomicAddAt_( 1, 0, value_, index_, max_row_size_, num_rows_, 10 );
// atomicAddAt_( 0, 2, value_, index_, max_row_size_, num_rows_, 10 );
// atomicAddAt_( 1, 0, value_, index_, max_row_size_, num_rows_, 10 );
// atomicAddAt_( 1, 1, value_, index_, max_row_size_, num_rows_, 10 );
// atomicAddAt_( 1, 2, value_, index_, max_row_size_, num_rows_, 10 );
// atomicAddAt_( 2, 0, value_, index_, max_row_size_, num_rows_, 10 );
// atomicAddAt_( 2, 1, value_, index_, max_row_size_, num_rows_, 10 );
// atomicAddAt_( 2, 2, value_, index_, max_row_size_, num_rows_, 10 );
}
}
// Au = b
int main()
{
size_t N = 100;
// A matrix
vector<double> A_value( N, 1);
vector<size_t> A_index( N );
size_t mrs = 1;
for ( int i = 0 ; i < N ; i++ )
A_index[i] = i;
double* d_A_value;
size_t* d_A_index;
CUDA_CALL( cudaMalloc((void**)&d_A_value, sizeof(double) * N ) );
CUDA_CALL( cudaMalloc((void**)&d_A_index, sizeof(size_t) * N ) );
CUDA_CALL( cudaMemcpy(d_A_value, &A_value[0], sizeof(double) * N, cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMemcpy(d_A_index, &A_index[0], sizeof(size_t) * N, cudaMemcpyHostToDevice) );
// vectors
vector<double> u(N,1);
double* d_b;
double* d_u;
CUDA_CALL( cudaMalloc((void**)&d_b, sizeof(double) * N ) );
CUDA_CALL( cudaMalloc((void**)&d_u, sizeof(double) * N ) );
CUDA_CALL( cudaMemcpy(d_u, &u[0], sizeof(double) * N, cudaMemcpyHostToDevice) );
// b = (A^T) * u
ApplyTransposed_GPU_<<<1,N>>>( N, mrs, d_A_value, d_A_index, d_u, d_b );
cudaDeviceSynchronize();
printVector_GPU<<<1,N>>>( d_b, N );
cudaDeviceSynchronize();
}
// // size_t R_mrs = 2;
// // size_t A_mrs = 2;
// // size_t P_mrs = 2;
// // vector<double> A_value_ = { 1,1,1,2,2,2,3,3,3};
// vector<size_t> A_index_ = { 0,0,0,1,1,1,2,2,2};
// vector<double> R_value = { 1, 3, 2, 2, 1, 4};
// vector<size_t> R_index = { 0, 3, 1, 2, 1, 3};
// vector<double> P_value = { 1, 2, 2, 3, 0, 1, 0, 4};
// vector<size_t> P_index = { 0,1,1,0,3,2,3,2};
// // vector<double> A_value = { 1, 5, 1, 2, 2, 1, 1, 0};
// // vector<size_t> A_index = { 0, 2, 1, 3, 1, 2, 3, 4};
// // vector<double> P_value = { 1, 0, 2, 1, 2, 0, 3, 4};
// // vector<size_t> P_index = { 0, 3, 1, 2, 1, 3, 0, 2};
// double* d_A_value;
// double* d_A_value_;
// double* d_R_value;
// double* d_P_value;
// size_t* d_A_index;
// size_t* d_A_index_;
// size_t* d_R_index;
// size_t* d_P_index;
// CUDA_CALL( cudaMalloc((void**)&d_A_value, sizeof(double) * num_rows[1] * A_mrs ) );
// CUDA_CALL( cudaMalloc((void**)&d_A_value_, sizeof(double) * num_rows[0] * num_rows[0] ) );
// CUDA_CALL( cudaMalloc((void**)&d_R_value, sizeof(double) * num_rows[0] * R_mrs ) );
// CUDA_CALL( cudaMalloc((void**)&d_P_value, sizeof(double) * num_rows[1] * P_mrs ) );
// CUDA_CALL( cudaMalloc((void**)&d_A_index, sizeof(size_t) * num_rows[1] * A_mrs ) );
// CUDA_CALL( cudaMalloc((void**)&d_A_index_, sizeof(size_t) * num_rows[0] * num_rows[0] ) );
// CUDA_CALL( cudaMalloc((void**)&d_R_index, sizeof(size_t) * num_rows[0] * R_mrs ) );
// CUDA_CALL( cudaMalloc((void**)&d_P_index, sizeof(size_t) * num_rows[1] * P_mrs ) );
// CUDA_CALL( cudaMemcpy(d_A_value, &A_value[0], sizeof(double) * num_rows[1] * A_mrs, cudaMemcpyHostToDevice) );
// CUDA_CALL( cudaMemcpy(d_A_value_, &A_value_[0], sizeof(double) * num_rows[0] * num_rows[0], cudaMemcpyHostToDevice) );
// CUDA_CALL( cudaMemcpy(d_R_value, &R_value[0], sizeof(double) * num_rows[0] * R_mrs, cudaMemcpyHostToDevice) );
// CUDA_CALL( cudaMemcpy(d_P_value, &P_value[0], sizeof(double) * num_rows[1] * P_mrs, cudaMemcpyHostToDevice) );
// CUDA_CALL( cudaMemcpy(d_A_index, &A_index[0], sizeof(size_t) * num_rows[1] * A_mrs, cudaMemcpyHostToDevice) );
// CUDA_CALL( cudaMemcpy(d_A_index_, &A_index_[0], sizeof(size_t) * num_rows[0] * num_rows[0], cudaMemcpyHostToDevice) );
// CUDA_CALL( cudaMemcpy(d_R_index, &R_index[0], sizeof(size_t) * num_rows[0] * R_mrs, cudaMemcpyHostToDevice) );
// CUDA_CALL( cudaMemcpy(d_P_index, &P_index[0], sizeof(size_t) * num_rows[1] * P_mrs, cudaMemcpyHostToDevice) );
// PTAP<<<1,4>>>(d_A_value, d_A_index, A_mrs, num_rows[1], d_A_value_, d_A_index_, 3, num_rows[0], d_P_value, d_P_index, P_mrs, 0);
// cudaDeviceSynchronize();
// // printELL_GPU_<<<1,1>>>(d_A_value, d_A_index, A_mrs, num_rows[1], num_rows[1]);
// // printELL_GPU_<<<1,1>>>(d_A_value_, d_A_index_, 3, num_rows[0], num_rows[0]);
// // printELL_GPU_<<<1,1>>>(d_P_value, d_P_index, P_mrs, num_rows[1], num_rows[0]);
// // printLinearVector(d_A_index_, 3, 3);
// cudaDeviceSynchronize(); |
19,121 | #include<iostream>
#include<cuda.h>
using namespace std;
__global__ void kernel(int *data)
{
data[threadIdx.x + blockIdx.x * 8 ] = threadIdx.x + blockIdx.x;
}
int main(){
const int numElem = 16;
int hostArray[numElem], *dArray;
//
cudaMalloc ( (void**) &dArray, sizeof(int) * numElem );
cudaMemset (dArray, 0, numElem * sizeof (int));
kernel <<< 2, 8 >>>( dArray);
cudaMemcpy(&hostArray, dArray, sizeof (int) * numElem , cudaMemcpyDeviceToHost);
for (int i = 0 ; i < numElem; i++)
cout << hostArray[i] << endl;
cudaFree(dArray);
return 0;
}
|
19,122 | #include "includes.h"
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeCost(const double *Params, const float *Ws, const float *mus, const float *W, const float *mu, const bool *iMatch, const int *iC, const int *Wh, float *cmax){
int j, tid, bid, Nspikes, my_chan, this_chan, Nchan, NrankPC, NchanNear, Nthreads, k;
float xsum = 0.0f, Ci;
Nspikes = (int) Params[0];
Nchan = (int) Params[7];
NrankPC = (int) Params[1];
NchanNear = (int) Params[6];
Nthreads = blockDim.x;
tid = threadIdx.x;
bid = blockIdx.x;
while(tid<Nspikes){
my_chan = Wh[tid];
if (iMatch[my_chan + bid*Nchan]){
xsum = 0.0f;
for (k=0;k<NchanNear;k++){
this_chan = iC[k + NchanNear * my_chan];
for (j=0;j<NrankPC;j++)
xsum += Ws[j + NrankPC*k + NrankPC*NchanNear * tid] *
W[j + NrankPC*this_chan + NrankPC*Nchan * bid];
}
Ci = mu[bid]*mu[bid] + mus[tid]*mus[tid] -2*mus[tid]*mu[bid]*xsum;
cmax[tid + bid*Nspikes] = Ci;
}
tid+= Nthreads;
}
} |
19,123 | #include "cuda.h"
#include "math_constants.h"
#include "cuda_runtime.h"
__global__ void runSinWaveKernel(float *data, int size,
float amp, float freq, float ip, int sr) // tt time interval is not needed
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (x >= size) return;
float sampleInterval = sr / freq; // Sample rate/signal freq
data[x] = amp * sinf((2.0 * CUDART_PI_F * (float)x) / sampleInterval + ip);
}
__global__ void runCosWaveKernel(float *data, int size,
float amp, float freq, float ip, int sr)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (x >= size) return;
float sampleInterval = sr / freq; // Sample rate/signal freq
data[x] = amp * cosf((2.0 * CUDART_PI_F * (float)x) / sampleInterval + ip);
}
bool SinWaveKernel(float *data, int size,
float amp, float freq, float ip, int sr)
{
runSinWaveKernel<<<1024, 1024 >>> (data, size, amp, freq, ip, sr);
cudaDeviceSynchronize();
return cudaPeekAtLastError() == cudaSuccess;
}
bool CosWaveKernel(float *data, int size,
float amp, float freq, float ip, int sr)
{
runCosWaveKernel<<<1024, 1024>>> (data, size, amp, freq, ip, sr);
cudaDeviceSynchronize();
return cudaPeekAtLastError() == cudaSuccess;
}
|
19,124 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void hello() {
printf("Hello CUDA from GPU!!!\n");
}
int main() {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
hello<<<1, 1>>>();
cudaDeviceSynchronize();
printf("Hello CPU\n");
printf("Device name: %s", prop.name);
return 0;
} // nvcc -arch=sm_35 hello.cu -o hello
|
19,125 | #include "includes.h"
__global__ void RoundKernel(float* input, float* output, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x;
if(id < size)
{
output[id] = round(input[id]);
}
} |
19,126 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#define BLOCK_WIDTH 16
#define TILE_WIDTH BLOCK_WIDTH
extern "C" void gpu_mat_mul(float* h_M, float* h_N, float* h_P, int m, int p, int n);
__global__
void gpu_mat_mul_kernel(float* M, float* N, float* P, int m, int p, int n){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Identify the row and column of the P element to work on
// Each thread works on an element of P
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float sum = 0;
int phase_num = p/TILE_WIDTH + 1;
// Each thread loads 'Row'th row of M and 'Col'th column of N
for (int ph = 0; ph < phase_num; ph++) {
// Collaboratively load data into shared memory
//Mds[ty][tx] = M[Row * width + ph * TILE_WIDTH + tx];
//Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * width + Col];
if ((ph * TILE_WIDTH + tx) < p && Row < m) {
Mds[ty][tx] = M[Row * p + ph * TILE_WIDTH + tx];
}
else {
Mds[ty][tx] = 0;
}
if ((ph * TILE_WIDTH + ty) < p && Col < n) {
Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * n + Col];
}
else {
Nds[ty][tx] = 0;
}
__syncthreads();
if (Row < m && Col < n) {
for (int k = 0; k < TILE_WIDTH; k++) {
sum += Mds[ty][k] * Nds[k][tx];
}
}
__syncthreads();
}
if (Row < m && Col < n) {
P[Row * n + Col] = sum;
}
}
void gpu_mat_mul(float* h_M, float* h_N, float* h_P, int m, int p, int n) {
float *d_M, *d_N, *d_P;
size_t size_of_float = sizeof(float);
size_t size_M = m * p * size_of_float;
size_t size_N = p * n * size_of_float;
size_t size_P = m * n * size_of_float;
cudaMalloc((void**)&d_M, size_M);
cudaMalloc((void**)&d_N, size_N);
cudaMalloc((void**)&d_P, size_P);
cudaMemcpy(d_M, h_M, size_M, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, h_N, size_N, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float elapsed_time = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dim3 grid_dim(n/BLOCK_WIDTH + 1, m/BLOCK_WIDTH + 1, 1);
dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1);
gpu_mat_mul_kernel<<<grid_dim, block_dim>>>(d_M, d_N, d_P, m, p, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(h_P, d_P, size_P, cudaMemcpyDeviceToHost);
// Free device memory for M, N, P
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf(" grid dim: %d, %d, %d.\n", grid_dim.x, grid_dim.y, grid_dim.z);
printf(" block dim: %d, %d, %d.\n", block_dim.x, block_dim.y, block_dim.z);
printf(" kernel time: %.5f sec\n", elapsed_time / 1000);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
19,127 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void myKernel(double* dA, double* dB, double* dC, int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index > size)
return;
dC[index] = dA[index] + dB[index];
}
double* read_array(const char* filename, int len) {
double *x = (double*) malloc(len * sizeof(double));
FILE *fp = fopen(filename, "r");
for (int i = 0; i < len; i++) {
fscanf(fp, "%lf", &x[i]);
}
fclose(fp);
return x;
}
int main(int argc, char *argv[]) {
if (argc != 3) {
printf("Invalid argument Usage: ./problem3 N M");
return -1;
}
const int N = atoi(argv[1]);
const int M = atoi(argv[2]);
//defining variables for timing
cudaEvent_t startEvent_inc, stopEvent_inc, startEvent_exc, stopEvent_exc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
cudaEventCreate(&startEvent_exc);
cudaEventCreate(&stopEvent_exc);
float elapsedTime_inc, elapsedTime_exc;
double *hA = read_array("inputA.inp", N);
double *hB = read_array("inputB.inp", N);
double *hC = (double*) malloc(N * sizeof(double));
double *refC = (double*) malloc(N * sizeof(double)); // Used to verify functional correctness
for (int i = 0; i < N; i++)
refC[i] = hA[i] + hB[i];
cudaEventRecord(startEvent_inc, 0); // starting timing for inclusive
double *dA, *dB, *dC;
cudaMalloc((void**) &dA, sizeof(double) * N);
cudaMalloc((void**) &dB, sizeof(double) * N);
cudaMalloc((void**) &dC, sizeof(double) * N);
cudaMemcpy(dA, hA, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaEventRecord(startEvent_exc, 0); // staring timing for exclusive
myKernel<<<N/M+1,M>>>(dA,dB,dC,N);
cudaEventRecord(stopEvent_exc, 0); // ending timing for exclusive
cudaEventSynchronize(stopEvent_exc);
cudaEventElapsedTime(&elapsedTime_exc, startEvent_exc, stopEvent_exc);
cudaMemcpy(hC, dC, sizeof(double) * N, cudaMemcpyDeviceToHost);
cudaEventRecord(stopEvent_inc, 0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
printf("%d\n%d\n%.3f\n%.3f\n%.3g\n", N, M, elapsedTime_exc, elapsedTime_inc, hC[N - 1]);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
free(refC);
free(hB);
free(hA);
return 0;
}
|
19,128 | #include "includes.h"
__global__ void null_kernel() {
}; |
19,129 | #include<bits/stdc++.h>
#include<cuda.h>
using namespace std;
#define CEIL(a,b) ((a-1)/b+1)
#define N 1024
__global__ void sum(float* d_a, float* d_b, float* d_c, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index<size)
d_c[index]=d_a[index]+d_b[index];
}
bool verify(float a[], float b[], float c[], int size)
{
for(int i=0; i<size; i++)
{
if(c[i]!=a[i]+b[i])
return false;
}
return true;
}
int main()
{
int size;
cout<<"enter array size : ";
cin>>size;
float h_a[size], h_b[size], h_c[size];
int bytes=size*sizeof(float);
for(int i=0; i<size; i++)
{
h_a[i]=rand()%1000;
h_b[i]=rand()%1000;
}
float *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, bytes);
cudaMalloc((void**)&d_b, bytes);
cudaMalloc((void**)&d_c, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
sum<<<CEIL(size,N), N>>>(d_a, d_b, d_c, size);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
if(verify(h_a, h_b, h_c, size))
cout<<"Result is Correct";
else
cout<<"Incorrect Result";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} |
19,130 | #include <thrust/reduce.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
__constant__ double PI = 3.141592653589;
__global__ void bayesianKernel(float *nn,float *kp, float *pv, float *tp, float *C, int start, int end, int comp, float *weights, float *mags, float a_th, int *ppCentroid, int* startppCentroid, float *ppCentroidData){
//I think I only need row
int inner_ele = blockIdx.y*blockDim.y+threadIdx.y; //This goes 0-2048
int actual_ele=inner_ele + start; //This goes 0-2048 for now, could get larger
//Get the actual_ele neighbour and compute vectors and stuff
float xt= nn[actual_ele*comp+0]-(tp[0]+kp[actual_ele*3+0]);
float yt= nn[actual_ele*comp+1]-(tp[1]+kp[actual_ele*3+1]);
float zt= nn[actual_ele*comp+2]-(tp[2]+kp[actual_ele*3+2]);
for (int i=0;i<ppCentroid[actual_ele];i++)
{
int idx=startppCentroid[actual_ele]+i; //0-2969 for now, could get larger
//int aff_id=ppCentroidData[idx*comp+0];
int or_id=ppCentroidData[idx*comp+1];
int pv_id=ppCentroidData[idx*comp+2];
//int kp_id=ppCentroidData[idx*comp+2];
//int large_idx=aff_id*512+kp_id; //0-2048 => [0-3]*[0-511]
//prev_idx is the id of kp in original set [0-16364]
//int prev_idx=4096*aff_id+(kp_id+or_id*512); //[0-16384] "ordered"
//This is some old code
// when Y was pointing upwards
// float angle=or_id*2*PI/8;
// float xpv=pv[idx*3+0]*cos(angle)+pv[idx*3+2]*sin(angle);
// float ypv=pv[idx*3+1];
// float zpv=-pv[idx*3+0]*sin(angle)+pv[idx*3+2]*cos(angle);
float angle=or_id*2*PI/8;
float xpv=pv[idx*3+0]*cos(angle)-pv[idx*3+1]*sin(angle);
float ypv=sin(angle)*pv[idx*3+0]+cos(angle)*pv[idx*3+1];
float zpv=pv[idx*3+2];
float diff=sqrt(((xt-xpv)*(xt-xpv))+((yt-ypv)*(yt-ypv))+((zt-zpv)*(zt-zpv)))/mags[idx]; //This is the difference as proportion of expected magnitude
//Likelihood is the sample from a normal distribution with mean 0 and std=0.1/weighs;
float sigma=a_th*(1+weights[idx]);
//float sigma=weights[idx];
float likelyhood=expf(-(diff*diff)/(2*sigma*sigma));
//float likelyhood=(1/sigma*(sqrt(2*PI)))*expf(-(diff*diff)/(2*sigma*sigma));
//float prior=1/ppCentroid[actual_ele];
C[idx]=likelyhood*weights[idx];
// if(or_id==0)
// {
// printf("Vid:%d Idx:%d LK:%f C:%f\n",pv_id,idx, likelyhood,weights[idx]*likelyhood);
// //printf("ID: %d ToGo: %d\n",idx,ppCentroid[actual_ele]);
// }
/* if(diff<=mags[idx])
{
float num=xpv*xt+ypv*yt+zpv*zt;
float den=xt*xt+yt*yt+zt*zt;
if((num*num/den)<=(mags[idx]*cos(a_th[aff_id]))^2)
C[idx]= 1;//weights[idx];
else
C[idx]=0;
}*/
}
// if(inner_ele<5)
// {
// printf("\n");
// }
}
void bayesian_scores(float *nn,float *kp, float *pv, float *tp, float *C, int start, int end, int comp, float *weights, float *mags, float a_th, int *ppCentroid, int* startppCentroid, float *ppCentroidData){
int maxThreads=128; //From tables
int N=end-start;
dim3 threadsPerBlock(1, maxThreads); //1x128
dim3 blocksPerGrid(1, N/maxThreads); //1x(4096/128) => 1x32
bayesianKernel<<<blocksPerGrid,threadsPerBlock>>>(nn, kp, pv, tp, C, start, end, comp, weights, mags, a_th, ppCentroid, startppCentroid, ppCentroidData);
cudaDeviceSynchronize();
}
|
19,131 | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define warp_size 32
#define Hwarp_size 16
#define N_points 33554432
#define A 0
#define B 15
void checkCUDAError(const char* msg);
__host__ __device__ inline double f(double x)
{
return exp(x)*sin(x);
}
__global__ void fn_evalCalc(double *fn_eval, double a, double b) //N_points/numBlocks should be a integer.
{
extern __shared__ double local_array[];
double step = (b-a)/N_points, mult, sum = 0.0, diff = (b-a)/gridDim.x;
int eval = N_points/gridDim.x;
//b = a + (blockIdx.x+1)*diff;
a += blockIdx.x*diff;
for(int k = threadIdx.x; k < eval; k += blockDim.x)
{
mult = (k%2==0)?2.0:4.0;
sum += mult*f(a + step*k);
}
local_array[threadIdx.x] = sum;
__syncthreads();
//BlockReduce.
for(int s = 1; s < blockDim.x; s *= 2)
{
if ((threadIdx.x % (2*s)) == 0)
local_array[threadIdx.x] += local_array[threadIdx.x + s];
__syncthreads();
}
if(!threadIdx.x)
fn_eval[blockIdx.x] = local_array[threadIdx.x];
}
__global__ void globalReduce(double *fn_eval, double a, double b, int size)
{
extern __shared__ double local_array[];
double step = (b-a)/N_points;
if(threadIdx.x < size)
local_array[threadIdx.x] = fn_eval[threadIdx.x];
for(int s = 1; s < blockDim.x; s *= 2)
{
if ((threadIdx.x % (2*s)) == 0)
local_array[threadIdx.x] += local_array[threadIdx.x + s];
__syncthreads();
}
if(!threadIdx.x)
fn_eval[0] = step*(local_array[threadIdx.x] + f(b) - f(a))/3;
}
int main( int argc, char** argv)
{
double sum=0.0,*d_fn_eval;
int numBlocks = 128, numThreadsPerBlock = 64; //keep numBlocks within 1024
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
timeval t;
double t1,t2;
cudaMalloc( (void **) &d_fn_eval, sizeof(double) );
gettimeofday(&t, NULL);
t1 = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
fn_evalCalc<<< numBlocks, numThreadsPerBlock, numThreadsPerBlock*sizeof(double) >>>(d_fn_eval,A,B);
globalReduce<<< 1, numBlocks, numBlocks*sizeof(double) >>>(d_fn_eval,A,B,numBlocks);
cudaThreadSynchronize();
gettimeofday(&t, NULL);
t2 = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
checkCUDAError("kernel invocation");
cudaMemcpy( &sum, d_fn_eval, sizeof(double), cudaMemcpyDeviceToHost );
checkCUDAError("memcpy");
//for(int k = 0; k<N_points; k++ )
// printf("%lf\t",h_fn_eval[k]);
//for(int k=0;k<numBlocks;k++)
// sum+=h_debug_output[k];
printf("%lf~~~TIME : %lf ms\n\n\n",sum,t2-t1);//,sum);
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
19,132 | #define DEBUG
#include "Data.cuh"
int main(int argc, char *argv[])
{
Data * data = new Data;
data->Read(argv[1]);
int flow = data->GetFlow();
data->BfsFromT();
return 0;
}
|
19,133 | #include "includes.h"
// filename: vmult!.cu
// a simple CUDA kernel to element multiply two vectors C=alpha*A.*B
extern "C" // ensure function name to be exactly "vmultbang"
{
}
__global__ void binaryentropy(const int lengthX, const double *x, const double *y, double *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] = x[i]*log(x[i]/y[i])+ (1.0-x[i])*log((1.0-x[i])/(1.0-y[i]));
}
} |
19,134 | // includes, system
#include <stdio.h>
#include <assert.h>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
///////////////////////////////////////////////////////////////////////////////
// Program main
///////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
cudaSetDevice(MYDEVICE);
// pointer and dimension for host memory
int n, dimA;
float *h_a;
// pointers for device memory
float *d_a, *d_b;
// allocate and initialize host memory
// Bonus: try using cudaMallocHost in place of malloc
dimA = 8;
h_a = (float *) malloc(dimA*sizeof(float));
for (n=0; n<dimA; n++)
{
h_a[n] = (float) n;
}
// Part 1 of 5: allocate device memory
size_t memSize = dimA*sizeof(float);
cudaMalloc((void **)&d_a, memSize);
cudaMalloc((void **)&d_b, memSize);
// Part 2 of 5: host to device memory copy
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
// Part 3 of 5: device to device memory copy
cudaMemcpy(d_b, d_a, memSize, cudaMemcpyDeviceToDevice);
// clear host memory
for (n=0; n<dimA; n++)
{
h_a[n] = 0.f;
}
// Part 4 of 5: device to host copy
cudaMemcpy(h_a, d_a ,memSize, cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy calls");
// verify the data on the host is correct
for (n=0; n<dimA; n++)
{
assert(h_a[n] == (float) n);
}
// Part 5 of 5: free device memory pointers d_a and d_b
cudaFree(d_a);
cudaFree(d_b);
// Check for any CUDA errors
checkCUDAError("cudaFree");
// free host memory pointer h_a
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
|
19,135 | //general parts
#include <stdio.h>
#include <vector>
#include <memory>
#include <string.h>
#include <chrono>
#include <thread>
#include <iostream>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
//CUDA parts
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
void launch_precision_cuFFT_single(void* inputC, void* output_cuFFT, int device_id, uint64_t* dims)
{
cudaSetDevice(device_id);
cufftHandle planC2C;
cufftComplex* dataC;
cudaMalloc((void**)&dataC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2]);
cudaMemcpy(dataC, inputC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyHostToDevice);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Cuda error: Failed to allocate\n");
return;
}
switch (dims[3]) {
case 1:
cufftPlan1d(&planC2C, dims[0], CUFFT_C2C, 1);
break;
case 2:
cufftPlan2d(&planC2C, dims[1], dims[0], CUFFT_C2C);
break;
case 3:
cufftPlan3d(&planC2C, dims[2], dims[1], dims[0], CUFFT_C2C);
break;
}
for (int i = 0; i < 1; i++) {
cufftExecC2C(planC2C, dataC, dataC, -1);
}
cudaDeviceSynchronize();
cudaMemcpy(output_cuFFT, dataC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cufftDestroy(planC2C);
cudaFree(dataC);
}
|
19,136 | #include <iostream>
#include <vector>
#include <cmath>
#include <random>
#define TPB 32 // tuning this parameter can improve CUDA perf
//control params
#define DIM 3
#define CENTROID_COUNT 8
#define POINTS_COUNT TPB * 40
#define POINTS_RANGE 256
#define ITERS 3
#define NORMAL_DIST false
#define PRINT true
///// UTILS
#define checkCuda(ans) { checkCudaError((ans), __LINE__); }
void checkCudaError(cudaError_t cudaStatus, int line) {
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Line %d CUDA Error %d: %s\n", line, cudaStatus, cudaGetErrorString(cudaStatus));
}
}
void print_timediff(const char *prefix, const struct timespec &start, const
struct timespec &end) {
float milliseconds = end.tv_nsec >= start.tv_nsec
? (end.tv_nsec - start.tv_nsec) / 1e6 + (end.tv_sec - start.tv_sec) * 1e3
: (start.tv_nsec - end.tv_nsec) / 1e6 + (end.tv_sec - start.tv_sec - 1) * 1e3;
printf("%s: %lf milliseconds\n", prefix, milliseconds);
}
//////////////////////////////////////// CPU ////////////////////////////////////////
float distanceBetweenTwoPoints(int *points, float *centroids, int point, int centroid) {
int sum = 0;
for (int i = 0; i < DIM; i++) {
sum += std::pow((float) points[point * DIM + i] - centroids[centroid * DIM + i], 2);
}
return std::sqrt(sum);
}
void randomCentroids(const int *points, float *centroids, int size) {
std::vector<float> copy(size);
for (int i = 0; i < size; i++) {
copy.at(i) = points[i];
}
for (int i = 0; i < CENTROID_COUNT; i++) {
int index = INT32_MAX;
while (index + DIM - 1 > copy.size()) {
index = (random() % copy.size()) * DIM;
}
std::vector<float>::iterator it1, it2;
it1 = (copy.begin() + index);
it2 = (copy.begin() + index + DIM);
for (int j = 0; j < DIM; j++) {
centroids[i * DIM + j] = copy.at(index + j);
}
copy.erase(it1, it2);
}
}
void kMeansCPU(int *points, int size) {
// step 0: choose n random points
float centroids[DIM * CENTROID_COUNT];
randomCentroids(points, centroids, size);
int pointToCentroid[size / DIM];
int iters = 0;
while (iters < ITERS) {
// step 1: assign each point to the closest centroid
for (int i = 0; i < size / DIM; i++) {
float minDist = MAXFLOAT;
int currentCentroid;
for (int j = 0; j < CENTROID_COUNT; j++) {
float dist = distanceBetweenTwoPoints(points, centroids, i, j);
if (minDist > dist) {
minDist = dist;
currentCentroid = j;
}
}
pointToCentroid[i] = currentCentroid;
}
// step 2: recompute centroids
int countsPerCluster[CENTROID_COUNT] = {};
int sumPerCluster[CENTROID_COUNT * DIM] = {};
for (int i = 0; i < POINTS_COUNT; i++) { //point
int c = pointToCentroid[i];
countsPerCluster[c] += 1;
for (int cDim = 0; cDim < DIM; cDim++) {
sumPerCluster[c * DIM + cDim] += points[i * DIM + cDim];
}
}
// recompute
for (int i = 0; i < CENTROID_COUNT; i++) {
for (int j = 0; j < DIM; j++) {
centroids[i * DIM + j] = (float) sumPerCluster[i * DIM + j] / (float) countsPerCluster[i];
}
}
// repeat step 1 and 2 until convergence (no point changed its cluster)
iters++;
}
#if PRINT
std::cout << "POINT-TO-CENTROID:" << std::endl;
for (int i = 0; i < size / DIM; i++) {
std::cout << pointToCentroid[i] << ",";
}
std::cout << std::endl;
std::cout << "CENTROIDS:" << std::endl;
for (int i = 0; i < CENTROID_COUNT * DIM; i++) {
std::cout << centroids[i] << ",";
}
std::cout << std::endl;
#endif
}
void randomArray(int size, int *array, int range = POINTS_RANGE) {
std::random_device rd{};
std::mt19937 gen{rd()};
std::normal_distribution<> d{POINTS_RANGE / 2, 2};
std::normal_distribution<> d2{POINTS_RANGE / 5, 2};
for (int i = 0; i < size; i++) {
#if NORMAL_DIST
if (i < size / 2)
array[i] = (int) d(gen) % range;
else
array[i] = (int) d2(gen) % range;
#else
array[i] = random() % range;
#endif
}
}
void printPoints(int size, const int *points) {
for (size_t i = 0; i < size; i++) {
std::cout << points[i] << ", ";
}
printf("\n");
}
//////////////////////////////////////// CUDA sol 1 (NOT OPTIMAL)////////////////////////////////////////
__global__ void distance(const int *points, float *dists, const float *centroids) {
const uint idx = blockIdx.x * blockDim.x + threadIdx.x;
//bounds check
if (idx >= POINTS_COUNT) return;
for (int currCentroid = 0; currCentroid < CENTROID_COUNT; currCentroid++) {
float sum = 0;
for (int currDim = 0; currDim < DIM; currDim++) {
sum += std::pow((float) points[idx * DIM + currDim] - centroids[currCentroid * DIM + currDim], 2);
}
dists[idx * CENTROID_COUNT + currCentroid] = std::sqrt(sum);
}
}
__global__ void assign(const float *dists, int *p2c) {
const uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= POINTS_COUNT) return;
float minDist = MAXFLOAT;
int minCentroid = -1;
for (int dist = 0; dist < CENTROID_COUNT; dist++) {
if (dists[idx * CENTROID_COUNT + dist] < minDist) {
minDist = dists[idx * CENTROID_COUNT + dist];
minCentroid = dist;
}
}
p2c[idx] = minCentroid;
}
__global__ void newCentroids(const int *points, const int *p2c, float *centroids) {
const uint centroidIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (centroidIdx >= CENTROID_COUNT) return;
int sum[DIM] = {0};
int count = 0;
for (int i = 0; i < POINTS_COUNT; i++) {
if (p2c[i] == centroidIdx) {
for (int curDim = 0; curDim < DIM; curDim++) {
sum[curDim] += points[i * DIM + curDim];
}
count++;
}
}
//compute new centroid
for (int curDim = 0; curDim < DIM; curDim++) {
centroids[centroidIdx * DIM + curDim] = (float) sum[curDim] / (float) count;
}
}
void kMeansCUDA(int *points) {
dim3 block(32, 4);
dim3 grid(block.x * block.y, (int) ceil((float) POINTS_COUNT / (float) (block.x * block.y)));
float *dists;
float *centroids;
int *pointToCentroid;
int *clusterSizes;
checkCuda(cudaMallocManaged(&dists, CENTROID_COUNT * POINTS_COUNT * sizeof(float)));
checkCuda(cudaMallocManaged(¢roids, CENTROID_COUNT * DIM * sizeof(float)));
checkCuda(cudaMallocManaged(&pointToCentroid, POINTS_COUNT * sizeof(int)));
checkCuda(cudaMallocManaged(&clusterSizes, CENTROID_COUNT * sizeof(int)));
randomCentroids(points, centroids, POINTS_COUNT * DIM);
int iter = 0;
while (iter < ITERS) {
//for each point calculate distance
cudaMemset(dists, 0.0, CENTROID_COUNT * POINTS_COUNT * sizeof(float));
distance<<<grid, block>>>(points, dists, centroids);
checkCuda(cudaDeviceSynchronize());
//assign centroid to each point
cudaMemset(dists, 0, POINTS_COUNT * sizeof(int));
assign<<<grid, block>>>(dists, pointToCentroid);
checkCuda(cudaDeviceSynchronize());
//recalculate each centroid
cudaMemset(centroids, 0.0, CENTROID_COUNT * DIM * sizeof(float));
newCentroids<<<1, CENTROID_COUNT>>>(points, pointToCentroid, centroids);
checkCuda(cudaDeviceSynchronize());
iter++;
}
#if PRINT
checkCuda(cudaDeviceSynchronize());
std::cout << "POINT-TO-CENTROID:" << std::endl;
for (int i = 0; i < POINTS_COUNT; i++) {
std::cout << pointToCentroid[i] << ",";
}
std::cout << std::endl;
std::cout << "CENTROIDS:" << std::endl;
for (int i = 0; i < CENTROID_COUNT * DIM; i++) {
std::cout << centroids[i] << ",";
}
std::cout << std::endl;
#endif
// cleanup
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaFree(centroids));
checkCuda(cudaFree(dists));
checkCuda(cudaFree(pointToCentroid));
checkCuda(cudaFree(clusterSizes));
}
//////////////////////////////////////// CUDA sol 2 ////////////////////////////////////////
__device__ float distance_squared(const int *points, const float *centroid) {
float sum = 0;
for (int i = 0; i < DIM; i++) {
sum += (points[i] - centroid[i]) * (points[i] - centroid[i]);
}
return sum;
}
__global__ void distances_calculation(const int *points, int *p2c, const float *centroids) {
const uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= POINTS_COUNT) { return; }
//find the closest centroid to this datapoint
float min_dist = INFINITY;
int closest_centroid = 0;
int point[DIM] = {};
for (int cDim = 0; cDim < DIM; cDim++) {
point[cDim] = points[idx * DIM + cDim];
}
for (int c = 0; c < CENTROID_COUNT; ++c) {
float centroid[DIM] = {};
for (int cDim = 0; cDim < DIM; cDim++) {
centroid[cDim] = centroids[c * DIM + cDim];
}
float dist = distance_squared(point, centroid);
if (dist < min_dist) {
min_dist = dist;
closest_centroid = c;
}
}
//assign closest cluster id for this datapoint/thread
p2c[idx] = closest_centroid;
}
// new centroids are calculated based on reductions O(log n) rather than O(n)
//https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf
__global__ void move_centroids(const int *points, const int *p2c, float *centroids, int *counters) {
const uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= POINTS_COUNT) { return; }
//get idx of thread at the block level
const uint s_idx = threadIdx.x;
//put the points and corresponding cluster assignments in shared memory so that they can be summed by thread 0 later
__shared__ int sPoints[TPB * DIM];
for (int cDim = 0; cDim < DIM; cDim++) {
sPoints[s_idx * DIM + cDim] = points[idx * DIM + cDim];
}
__shared__ int sp2c[TPB];
sp2c[s_idx] = p2c[idx];
__syncthreads();
//it is the thread with idx 0 (in each block) that sums up all the values within the shared array for the block it is in
if (s_idx == 0) {
float blockSums[CENTROID_COUNT * DIM] = {0};
int blockCounts[CENTROID_COUNT] = {0};
for (int j = 0; j < blockDim.x; ++j) {
int cid = sp2c[j];
for (int cDim = 0; cDim < DIM; cDim++) {
blockSums[cid * DIM + cDim] += (float) sPoints[j * DIM + cDim];
}
blockCounts[cid] += 1;
}
//Now we add the sums to the global centroids and add the counts to the global counts.
for (int z = 0; z < CENTROID_COUNT; ++z) {
for (int cDim = 0; cDim < DIM; cDim++) {
atomicAdd(¢roids[z * DIM + cDim], blockSums[z * DIM + cDim]);
}
atomicAdd(&counters[z], blockCounts[z]);
}
}
__syncthreads();
//get centroids
if (idx < CENTROID_COUNT) {
for (int cDim = 0; cDim < DIM; cDim++) {
centroids[idx * DIM + cDim] = centroids[idx * DIM + cDim] / (float) counters[idx];
}
}
}
void optimalKMeansCUDA(int *points) {
float *centroids;
float *new_centroids;
int *counters;
int *pointToCentroid;
checkCuda(cudaMallocManaged(¢roids, CENTROID_COUNT * DIM * sizeof(float)));
checkCuda(cudaMallocManaged(&new_centroids, CENTROID_COUNT * DIM * sizeof(float)));
checkCuda(cudaMallocManaged(&counters, CENTROID_COUNT * sizeof(int)));
checkCuda(cudaMallocManaged(&pointToCentroid, POINTS_COUNT * sizeof(int)));
randomCentroids(points, centroids, POINTS_COUNT * DIM);
for (int i = 0; i < ITERS; ++i) {
distances_calculation<<<(POINTS_COUNT + TPB - 1) / TPB, TPB>>>(points, pointToCentroid, centroids);
checkCuda(cudaDeviceSynchronize());
cudaMemset(centroids, 0.0, CENTROID_COUNT * sizeof(float));
cudaMemset(counters, 0, CENTROID_COUNT * sizeof(int));
move_centroids<<<(POINTS_COUNT + TPB - 1) / TPB, TPB>>>(points, pointToCentroid, centroids, counters);
checkCuda(cudaDeviceSynchronize());
}
#if PRINT
checkCuda(cudaDeviceSynchronize());
std::cout << "POINT-TO-CENTROID:" << std::endl;
for (int i = 0; i < POINTS_COUNT; i++) {
std::cout << pointToCentroid[i] << ",";
}
std::cout << std::endl;
std::cout << "CENTROIDS:" << std::endl;
for (int i = 0; i < CENTROID_COUNT * DIM; i++) {
std::cout << centroids[i] << ",";
}
std::cout << std::endl;
#endif
//cleanup
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaFree(centroids));
checkCuda(cudaFree(new_centroids));
checkCuda(cudaFree(counters));
checkCuda(cudaFree(pointToCentroid));
}
int main() {
struct timespec start, end;
int size = POINTS_COUNT * DIM;
int *points;
checkCuda(cudaMallocManaged(&points, size * sizeof(int)));
randomArray(size, points, POINTS_RANGE);
printf("----CPU SOLUTION----\n");
clock_gettime(CLOCK_MONOTONIC, &start);
kMeansCPU(points, size);
clock_gettime(CLOCK_MONOTONIC, &end);
print_timediff("CPU time ", start, end);
printf("----CUDA NOT OPTIMAL SOLUTION----\n");
clock_gettime(CLOCK_MONOTONIC, &start);
kMeansCUDA(points);
clock_gettime(CLOCK_MONOTONIC, &end);
print_timediff("CUDA time ", start, end);
printf("----CUDA BETTER SOLUTION----\n");
clock_gettime(CLOCK_MONOTONIC, &start);
optimalKMeansCUDA(points);
clock_gettime(CLOCK_MONOTONIC, &end);
print_timediff("CUDA time ", start, end);
#if PRINT
printPoints(size, points);
#endif
checkCuda(cudaFree(points));
}
|
19,137 | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#include <stdio.h>
__global__ void kernel_1(int repeat) {
__shared__ unsigned char s[12288];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_2(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_3(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_4(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_5(int repeat) {
__shared__ unsigned char s[40960];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_6(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_7(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_8(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_9(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_10(int repeat) {
__shared__ unsigned char s[4096];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_11(int repeat) {
__shared__ unsigned char s[33792];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_12(int repeat) {
__shared__ unsigned char s[4096];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_13(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_14(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_15(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_16(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_17(int repeat) {
__shared__ unsigned char s[36864];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_18(int repeat) {
__shared__ unsigned char s[12288];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_19(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_20(int repeat) {
__shared__ unsigned char s[39936];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_21(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_22(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_23(int repeat) {
__shared__ unsigned char s[21504];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_24(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_25(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_26(int repeat) {
__shared__ unsigned char s[6144];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_27(int repeat) {
__shared__ unsigned char s[35840];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_28(int repeat) {
__shared__ unsigned char s[22528];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_29(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_30(int repeat) {
__shared__ unsigned char s[41984];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_31(int repeat) {
__shared__ unsigned char s[5120];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_32(int repeat) {
__shared__ unsigned char s[18432];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_33(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_34(int repeat) {
__shared__ unsigned char s[36864];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_35(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_36(int repeat) {
__shared__ unsigned char s[21504];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_37(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_38(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_39(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_40(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_41(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_42(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_43(int repeat) {
__shared__ unsigned char s[21504];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_44(int repeat) {
__shared__ unsigned char s[35840];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_45(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_46(int repeat) {
__shared__ unsigned char s[31744];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_47(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_48(int repeat) {
__shared__ unsigned char s[12288];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_49(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_50(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_51(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_52(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_53(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_54(int repeat) {
__shared__ unsigned char s[16384];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_55(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_56(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_57(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_58(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_59(int repeat) {
__shared__ unsigned char s[25600];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_60(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_61(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_62(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_63(int repeat) {
__shared__ unsigned char s[27648];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_64(int repeat) {
__shared__ unsigned char s[22528];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_65(int repeat) {
__shared__ unsigned char s[18432];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_66(int repeat) {
__shared__ unsigned char s[20480];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_67(int repeat) {
__shared__ unsigned char s[18432];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_68(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_69(int repeat) {
__shared__ unsigned char s[2048];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_70(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_71(int repeat) {
__shared__ unsigned char s[40960];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_72(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_73(int repeat) {
__shared__ unsigned char s[3072];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_74(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_75(int repeat) {
__shared__ unsigned char s[38912];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_76(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_77(int repeat) {
__shared__ unsigned char s[41984];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_78(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_79(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_80(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_81(int repeat) {
__shared__ unsigned char s[19456];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_82(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_83(int repeat) {
__shared__ unsigned char s[23552];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_84(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_85(int repeat) {
__shared__ unsigned char s[7168];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_86(int repeat) {
__shared__ unsigned char s[37888];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_87(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_88(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_89(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_90(int repeat) {
__shared__ unsigned char s[40960];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_91(int repeat) {
__shared__ unsigned char s[39936];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_92(int repeat) {
__shared__ unsigned char s[37888];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_93(int repeat) {
__shared__ unsigned char s[29696];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_94(int repeat) {
__shared__ unsigned char s[14336];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_95(int repeat) {
__shared__ unsigned char s[24576];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_96(int repeat) {
__shared__ unsigned char s[16384];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_97(int repeat) {
__shared__ unsigned char s[38912];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_98(int repeat) {
__shared__ unsigned char s[17408];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_99(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_100(int repeat) {
__shared__ unsigned char s[33792];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_101(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_102(int repeat) {
__shared__ unsigned char s[10240];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_103(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_104(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_105(int repeat) {
__shared__ unsigned char s[17408];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_106(int repeat) {
__shared__ unsigned char s[32768];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_107(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_108(int repeat) {
__shared__ unsigned char s[28672];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_109(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_110(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_111(int repeat) {
__shared__ unsigned char s[9216];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_112(int repeat) {
__shared__ unsigned char s[34816];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_113(int repeat) {
__shared__ unsigned char s[6144];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_114(int repeat) {
__shared__ unsigned char s[1024];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_115(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_116(int repeat) {
__shared__ unsigned char s[17408];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_117(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_118(int repeat) {
__shared__ unsigned char s[26624];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_119(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_120(int repeat) {
__shared__ unsigned char s[29696];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_121(int repeat) {
__shared__ unsigned char s[30720];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_122(int repeat) {
__shared__ unsigned char s[15360];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_123(int repeat) {
__shared__ unsigned char s[29696];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_124(int repeat) {
__shared__ unsigned char s[4096];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_125(int repeat) {
__shared__ unsigned char s[6144];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_126(int repeat) {
__shared__ unsigned char s[13312];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_127(int repeat) {
__shared__ unsigned char s[8192];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
__global__ void kernel_128(int repeat) {
__shared__ unsigned char s[11264];
int i = threadIdx.x;
s[i] = 0;
for (int n = 0; n < 45; n++) {
for (int n = 0; n < repeat; n++) s[i]++;
for (int n = 0; n < repeat; n++) s[i]--;
for (int n = 0; n < repeat; n++) s[i]++;
}
}
int main() {
cudaStream_t streams[128];
for (int i = 0; i < 128; i++) cudaStreamCreate(&streams[i]);
{
int repeat = 33792;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_93, grid, block, args, 0, streams[0]);
}
{
int repeat = 48128;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_18, grid, block, args, 0, streams[1]);
}
{
int repeat = 34816;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_32, grid, block, args, 0, streams[2]);
}
{
int repeat = 43008;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_29, grid, block, args, 0, streams[3]);
}
{
int repeat = 36864;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_106, grid, block, args, 0, streams[4]);
}
{
int repeat = 54272;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_12, grid, block, args, 0, streams[5]);
}
{
int repeat = 46080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_98, grid, block, args, 0, streams[6]);
}
{
int repeat = 35840;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_13, grid, block, args, 0, streams[7]);
}
{
int repeat = 78848;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_72, grid, block, args, 0, streams[8]);
}
{
int repeat = 46080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_71, grid, block, args, 0, streams[9]);
}
{
int repeat = 39936;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_57, grid, block, args, 0, streams[10]);
}
{
int repeat = 46080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_25, grid, block, args, 0, streams[11]);
}
{
int repeat = 64512;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_127, grid, block, args, 0, streams[12]);
}
{
int repeat = 49152;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_2, grid, block, args, 0, streams[13]);
}
{
int repeat = 72704;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_124, grid, block, args, 0, streams[14]);
}
{
int repeat = 51200;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_112, grid, block, args, 0, streams[15]);
}
{
int repeat = 45056;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_88, grid, block, args, 0, streams[16]);
}
{
int repeat = 87040;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_22, grid, block, args, 0, streams[17]);
}
{
int repeat = 104448;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_69, grid, block, args, 0, streams[18]);
}
{
int repeat = 61440;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_15, grid, block, args, 0, streams[19]);
}
{
int repeat = 163840;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_114, grid, block, args, 0, streams[20]);
}
{
int repeat = 62464;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_45, grid, block, args, 0, streams[21]);
}
{
int repeat = 55296;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_38, grid, block, args, 0, streams[22]);
}
{
int repeat = 86016;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_10, grid, block, args, 0, streams[23]);
}
{
int repeat = 92160;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_89, grid, block, args, 0, streams[24]);
}
{
int repeat = 93184;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_80, grid, block, args, 0, streams[25]);
}
{
int repeat = 88064;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_95, grid, block, args, 0, streams[26]);
}
{
int repeat = 47104;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_24, grid, block, args, 0, streams[27]);
}
{
int repeat = 102400;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_116, grid, block, args, 0, streams[28]);
}
{
int repeat = 132096;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_125, grid, block, args, 0, streams[29]);
}
{
int repeat = 51200;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_35, grid, block, args, 0, streams[30]);
}
{
int repeat = 102400;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_66, grid, block, args, 0, streams[31]);
}
{
int repeat = 63488;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_4, grid, block, args, 0, streams[32]);
}
{
int repeat = 100352;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_7, grid, block, args, 0, streams[33]);
}
{
int repeat = 68608;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_99, grid, block, args, 0, streams[34]);
}
{
int repeat = 167936;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_52, grid, block, args, 0, streams[35]);
}
{
int repeat = 75776;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_103, grid, block, args, 0, streams[36]);
}
{
int repeat = 77824;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_41, grid, block, args, 0, streams[37]);
}
{
int repeat = 189440;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_31, grid, block, args, 0, streams[38]);
}
{
int repeat = 211968;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_73, grid, block, args, 0, streams[39]);
}
{
int repeat = 75776;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_117, grid, block, args, 0, streams[40]);
}
{
int repeat = 113664;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_54, grid, block, args, 0, streams[41]);
}
{
int repeat = 119808;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_82, grid, block, args, 0, streams[42]);
}
{
int repeat = 119808;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_19, grid, block, args, 0, streams[43]);
}
{
int repeat = 159744;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_111, grid, block, args, 0, streams[44]);
}
{
int repeat = 81920;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_120, grid, block, args, 0, streams[45]);
}
{
int repeat = 111616;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_49, grid, block, args, 0, streams[46]);
}
{
int repeat = 144384;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_44, grid, block, args, 0, streams[47]);
}
{
int repeat = 92160;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_92, grid, block, args, 0, streams[48]);
}
{
int repeat = 124928;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_55, grid, block, args, 0, streams[49]);
}
{
int repeat = 110592;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_62, grid, block, args, 0, streams[50]);
}
{
int repeat = 112640;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_126, grid, block, args, 0, streams[51]);
}
{
int repeat = 103424;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_34, grid, block, args, 0, streams[52]);
}
{
int repeat = 205824;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_109, grid, block, args, 0, streams[53]);
}
{
int repeat = 146432;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_47, grid, block, args, 0, streams[54]);
}
{
int repeat = 229376;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_26, grid, block, args, 0, streams[55]);
}
{
int repeat = 130048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_110, grid, block, args, 0, streams[56]);
}
{
int repeat = 117760;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_81, grid, block, args, 0, streams[57]);
}
{
int repeat = 126976;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_42, grid, block, args, 0, streams[58]);
}
{
int repeat = 142336;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_21, grid, block, args, 0, streams[59]);
}
{
int repeat = 240640;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_113, grid, block, args, 0, streams[60]);
}
{
int repeat = 130048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_16, grid, block, args, 0, streams[61]);
}
{
int repeat = 84992;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_75, grid, block, args, 0, streams[62]);
}
{
int repeat = 150528;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_115, grid, block, args, 0, streams[63]);
}
{
int repeat = 129024;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_51, grid, block, args, 0, streams[64]);
}
{
int repeat = 174080;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_33, grid, block, args, 0, streams[65]);
}
{
int repeat = 158720;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_60, grid, block, args, 0, streams[66]);
}
{
int repeat = 176128;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_64, grid, block, args, 0, streams[67]);
}
{
int repeat = 156672;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_79, grid, block, args, 0, streams[68]);
}
{
int repeat = 154624;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_6, grid, block, args, 0, streams[69]);
}
{
int repeat = 184320;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_101, grid, block, args, 0, streams[70]);
}
{
int repeat = 190464;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_107, grid, block, args, 0, streams[71]);
}
{
int repeat = 183296;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_63, grid, block, args, 0, streams[72]);
}
{
int repeat = 149504;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_40, grid, block, args, 0, streams[73]);
}
{
int repeat = 176128;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_87, grid, block, args, 0, streams[74]);
}
{
int repeat = 233472;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_85, grid, block, args, 0, streams[75]);
}
{
int repeat = 183296;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_9, grid, block, args, 0, streams[76]);
}
{
int repeat = 159744;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_105, grid, block, args, 0, streams[77]);
}
{
int repeat = 184320;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_65, grid, block, args, 0, streams[78]);
}
{
int repeat = 211968;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_102, grid, block, args, 0, streams[79]);
}
{
int repeat = 187392;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_96, grid, block, args, 0, streams[80]);
}
{
int repeat = 232448;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_1, grid, block, args, 0, streams[81]);
}
{
int repeat = 182272;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_83, grid, block, args, 0, streams[82]);
}
{
int repeat = 173056;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_23, grid, block, args, 0, streams[83]);
}
{
int repeat = 188416;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_68, grid, block, args, 0, streams[84]);
}
{
int repeat = 195584;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_122, grid, block, args, 0, streams[85]);
}
{
int repeat = 231424;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_84, grid, block, args, 0, streams[86]);
}
{
int repeat = 211968;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_118, grid, block, args, 0, streams[87]);
}
{
int repeat = 231424;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_128, grid, block, args, 0, streams[88]);
}
{
int repeat = 246784;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_104, grid, block, args, 0, streams[89]);
}
{
int repeat = 244736;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_61, grid, block, args, 0, streams[90]);
}
{
int repeat = 181248;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_36, grid, block, args, 0, streams[91]);
}
{
int repeat = 209920;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_14, grid, block, args, 0, streams[92]);
}
{
int repeat = 201728;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_50, grid, block, args, 0, streams[93]);
}
{
int repeat = 225280;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_59, grid, block, args, 0, streams[94]);
}
{
int repeat = 215040;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_78, grid, block, args, 0, streams[95]);
}
{
int repeat = 216064;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_70, grid, block, args, 0, streams[96]);
}
{
int repeat = 182272;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_11, grid, block, args, 0, streams[97]);
}
{
int repeat = 184320;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_100, grid, block, args, 0, streams[98]);
}
{
int repeat = 215040;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_94, grid, block, args, 0, streams[99]);
}
{
int repeat = 199680;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_76, grid, block, args, 0, streams[100]);
}
{
int repeat = 258048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_119, grid, block, args, 0, streams[101]);
}
{
int repeat = 246784;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_43, grid, block, args, 0, streams[102]);
}
{
int repeat = 242688;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_67, grid, block, args, 0, streams[103]);
}
{
int repeat = 242688;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_37, grid, block, args, 0, streams[104]);
}
{
int repeat = 250880;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_48, grid, block, args, 0, streams[105]);
}
{
int repeat = 230400;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_56, grid, block, args, 0, streams[106]);
}
{
int repeat = 208896;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_46, grid, block, args, 0, streams[107]);
}
{
int repeat = 203776;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_121, grid, block, args, 0, streams[108]);
}
{
int repeat = 195584;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_39, grid, block, args, 0, streams[109]);
}
{
int repeat = 212992;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_123, grid, block, args, 0, streams[110]);
}
{
int repeat = 122880;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_17, grid, block, args, 0, streams[111]);
}
{
int repeat = 195584;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_27, grid, block, args, 0, streams[112]);
}
{
int repeat = 201728;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_53, grid, block, args, 0, streams[113]);
}
{
int repeat = 220160;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_74, grid, block, args, 0, streams[114]);
}
{
int repeat = 216064;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_3, grid, block, args, 0, streams[115]);
}
{
int repeat = 236544;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_28, grid, block, args, 0, streams[116]);
}
{
int repeat = 218112;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_108, grid, block, args, 0, streams[117]);
}
{
int repeat = 259072;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_58, grid, block, args, 0, streams[118]);
}
{
int repeat = 261120;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_8, grid, block, args, 0, streams[119]);
}
{
int repeat = 95232;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_90, grid, block, args, 0, streams[120]);
}
{
int repeat = 118784;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_77, grid, block, args, 0, streams[121]);
}
{
int repeat = 130048;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_97, grid, block, args, 0, streams[122]);
}
{
int repeat = 141312;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_5, grid, block, args, 0, streams[123]);
}
{
int repeat = 175104;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_20, grid, block, args, 0, streams[124]);
}
{
int repeat = 194560;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_30, grid, block, args, 0, streams[125]);
}
{
int repeat = 221184;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_86, grid, block, args, 0, streams[126]);
}
{
int repeat = 241664;
dim3 grid = { 2, 1, 1 };
dim3 block = { 32, 1, 1 };
void* args[] = { (void**)&repeat };
cudaLaunchKernel(kernel_91, grid, block, args, 0, streams[127]);
}
cudaStreamSynchronize(streams[0]);
cudaStreamSynchronize(streams[1]);
cudaStreamSynchronize(streams[2]);
cudaStreamSynchronize(streams[3]);
cudaStreamSynchronize(streams[4]);
cudaStreamSynchronize(streams[5]);
cudaStreamSynchronize(streams[6]);
cudaStreamSynchronize(streams[7]);
cudaStreamSynchronize(streams[8]);
cudaStreamSynchronize(streams[9]);
cudaStreamSynchronize(streams[10]);
cudaStreamSynchronize(streams[11]);
cudaStreamSynchronize(streams[12]);
cudaStreamSynchronize(streams[13]);
cudaStreamSynchronize(streams[14]);
cudaStreamSynchronize(streams[15]);
cudaStreamSynchronize(streams[16]);
cudaStreamSynchronize(streams[17]);
cudaStreamSynchronize(streams[18]);
cudaStreamSynchronize(streams[19]);
cudaStreamSynchronize(streams[20]);
cudaStreamSynchronize(streams[21]);
cudaStreamSynchronize(streams[22]);
cudaStreamSynchronize(streams[23]);
cudaStreamSynchronize(streams[24]);
cudaStreamSynchronize(streams[25]);
cudaStreamSynchronize(streams[26]);
cudaStreamSynchronize(streams[27]);
cudaStreamSynchronize(streams[28]);
cudaStreamSynchronize(streams[29]);
cudaStreamSynchronize(streams[30]);
cudaStreamSynchronize(streams[31]);
cudaStreamSynchronize(streams[32]);
cudaStreamSynchronize(streams[33]);
cudaStreamSynchronize(streams[34]);
cudaStreamSynchronize(streams[35]);
cudaStreamSynchronize(streams[36]);
cudaStreamSynchronize(streams[37]);
cudaStreamSynchronize(streams[38]);
cudaStreamSynchronize(streams[39]);
cudaStreamSynchronize(streams[40]);
cudaStreamSynchronize(streams[41]);
cudaStreamSynchronize(streams[42]);
cudaStreamSynchronize(streams[43]);
cudaStreamSynchronize(streams[44]);
cudaStreamSynchronize(streams[45]);
cudaStreamSynchronize(streams[46]);
cudaStreamSynchronize(streams[47]);
cudaStreamSynchronize(streams[48]);
cudaStreamSynchronize(streams[49]);
cudaStreamSynchronize(streams[50]);
cudaStreamSynchronize(streams[51]);
cudaStreamSynchronize(streams[52]);
cudaStreamSynchronize(streams[53]);
cudaStreamSynchronize(streams[54]);
cudaStreamSynchronize(streams[55]);
cudaStreamSynchronize(streams[56]);
cudaStreamSynchronize(streams[57]);
cudaStreamSynchronize(streams[58]);
cudaStreamSynchronize(streams[59]);
cudaStreamSynchronize(streams[60]);
cudaStreamSynchronize(streams[61]);
cudaStreamSynchronize(streams[62]);
cudaStreamSynchronize(streams[63]);
cudaStreamSynchronize(streams[64]);
cudaStreamSynchronize(streams[65]);
cudaStreamSynchronize(streams[66]);
cudaStreamSynchronize(streams[67]);
cudaStreamSynchronize(streams[68]);
cudaStreamSynchronize(streams[69]);
cudaStreamSynchronize(streams[70]);
cudaStreamSynchronize(streams[71]);
cudaStreamSynchronize(streams[72]);
cudaStreamSynchronize(streams[73]);
cudaStreamSynchronize(streams[74]);
cudaStreamSynchronize(streams[75]);
cudaStreamSynchronize(streams[76]);
cudaStreamSynchronize(streams[77]);
cudaStreamSynchronize(streams[78]);
cudaStreamSynchronize(streams[79]);
cudaStreamSynchronize(streams[80]);
cudaStreamSynchronize(streams[81]);
cudaStreamSynchronize(streams[82]);
cudaStreamSynchronize(streams[83]);
cudaStreamSynchronize(streams[84]);
cudaStreamSynchronize(streams[85]);
cudaStreamSynchronize(streams[86]);
cudaStreamSynchronize(streams[87]);
cudaStreamSynchronize(streams[88]);
cudaStreamSynchronize(streams[89]);
cudaStreamSynchronize(streams[90]);
cudaStreamSynchronize(streams[91]);
cudaStreamSynchronize(streams[92]);
cudaStreamSynchronize(streams[93]);
cudaStreamSynchronize(streams[94]);
cudaStreamSynchronize(streams[95]);
cudaStreamSynchronize(streams[96]);
cudaStreamSynchronize(streams[97]);
cudaStreamSynchronize(streams[98]);
cudaStreamSynchronize(streams[99]);
cudaStreamSynchronize(streams[100]);
cudaStreamSynchronize(streams[101]);
cudaStreamSynchronize(streams[102]);
cudaStreamSynchronize(streams[103]);
cudaStreamSynchronize(streams[104]);
cudaStreamSynchronize(streams[105]);
cudaStreamSynchronize(streams[106]);
cudaStreamSynchronize(streams[107]);
cudaStreamSynchronize(streams[108]);
cudaStreamSynchronize(streams[109]);
cudaStreamSynchronize(streams[110]);
cudaStreamSynchronize(streams[111]);
cudaStreamSynchronize(streams[112]);
cudaStreamSynchronize(streams[113]);
cudaStreamSynchronize(streams[114]);
cudaStreamSynchronize(streams[115]);
cudaStreamSynchronize(streams[116]);
cudaStreamSynchronize(streams[117]);
cudaStreamSynchronize(streams[118]);
cudaStreamSynchronize(streams[119]);
cudaStreamSynchronize(streams[120]);
cudaStreamSynchronize(streams[121]);
cudaStreamSynchronize(streams[122]);
cudaStreamSynchronize(streams[123]);
cudaStreamSynchronize(streams[124]);
cudaStreamSynchronize(streams[125]);
cudaStreamSynchronize(streams[126]);
cudaStreamSynchronize(streams[127]);
cudaProfilerStop();
for (int i = 0; i < 128; i++) cudaStreamDestroy(streams[i]);
}
|
19,138 | #include "CudaComputing.cuh"
#include "cuda_runtime.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include "math.h"
__device__ bool HasTheBall ;
__global__ void setDev_ball(bool dev_ball){
HasTheBall = dev_ball;
}
void setTheBall(bool Ball){
setDev_ball << <1, 1 >> >(Ball);
}
__device__ bool HasTheCube ;
__global__ void setDev_cube(bool dev_cube){
HasTheCube = dev_cube;
}
void setTheCube(bool cube){
setDev_cube << <1, 1 >> >(cube);
}
__device__ bool HasTheCy ;
__global__ void setDev_cy(bool dev_cy){
HasTheCy = dev_cy;
}
void setTheCylinder(bool Cy){
setDev_cy << <1, 1 >> >(Cy);
}
__device__ bool HasTheMirror ;
__global__ void setDev_mirror(bool dev_mirror){
HasTheMirror = dev_mirror;
}
void setTheMirror(bool mi){
setDev_mirror << <1, 1 >> >(mi);
}
__device__ bool HasTheCurve;
__global__ void setDev_curve(bool dev_curv){
HasTheCurve = dev_curv;
}
void setTheCurve(bool cur){
setDev_curve << <1, 1 >> >(cur);
}
__device__ bool HasTheShadow ;
__global__ void setDev_shadow(bool dev_sha){
HasTheShadow = dev_sha;
}
void setTheShadow(bool sha){
setDev_shadow << <1, 1 >> >(sha);
}
__device__ bool HasTheBallFlection;
__global__ void setDev_BF(bool dev_sha){
HasTheBallFlection = dev_sha;
}
void setTheBF(bool sha){
setDev_BF<< <1, 1 >> >(sha);
}
__device__ float CyHeight = 250;
__device__ float CubeX = 600;
__device__ float CubeY = 0;
__device__ float CubeZ = -400;
__device__ float CyX = 800;
__device__ float CyY = 0;
__device__ float CyZ = -300;
__device__ bool chekcSolution(float a, float b, float c){
if ((b*b - 4 * a*c)<0)return false;
return true;
}
__device__ float getSolution1(float a, float b, float c){
float rst = -b + sqrt(b*b - 4 * a*c);
rst = rst / (2 * a);
return rst;
}
__device__ float getSolution2(float a, float b, float c){
float rst = -b - sqrt(b*b - 4 * a*c);
rst = rst / (2 * a);
return rst;
}
__device__ float dot(float3 a, float3 b){
float c;
c = a.x*b.x + a.y*b.y + a.z*b.z;
return c;
}
__device__ float3 normalize(float3 n){
float length1 = n.x*n.x + n.y*n.y + n.z*n.z;
float length = sqrt(length1);
n.x = n.x / length;
n.y = n.y / length;
n.z = n.z / length;
return n;
}
__device__ float bigger(float a, float b){
if (a > b)return a;
return b;
}
__device__ bool IsHitTheCube(float3 s, float3 center, float e){
float up = center.y + e;
float down = center.y;
float left = center.x - e / 2;
float right = center.x + e / 2;
float front = center.z + e / 2;
float back = center.z - e / 2;
if (s.y <= up&&s.y >= down&&s.x >= left&&s.x <= right&&s.z <= front&&s.z >= back){
return true;
}
return false;
}
//ģ߳
__device__ float4 HitTheCube(float3 t,float3 d,float3 center,float e){
float up=center.y+e;
float down=center.y;
float left=center.x-e/2;
float right=center.x+e/2;
float front=center.z+e/2;
float back=center.z-e/2;
if (t.x - d.x * 5 > right&&t.x <= right){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 1.0));
}
if (t.x - d.x * 5 < left&&t.x >= left){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 2.0));
}
if (t.y - d.y * 5 > up&&t.y <= up){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 3.0));
}
if (t.y - d.y * 5 < down&&t.y >= down){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 4.0));
}
if (t.z - d.z * 5 > front&&t.z <= front){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 5.0));
}
if (t.z - d.z * 5 < back&&t.z >= back){
return(make_float4(t.x - d.x * 2.5, t.y - d.y * 2.5, t.z - d.z * 2.5, 7.0));
}
return make_float4(0.0, 0.0, 0.0, 0.0);
}
__device__ bool IsHitTheCylinder(float3 s,float3 c,float r,float h){
if ((s.x - c.x)*(s.x - c.x) + (s.z - c.z)*(s.z - c.z) <= r*r&&s.y <= h&&s.y>=0){
return true;
}
return false;
}
//ģ뾶߶
__device__ float4 HitTheCylinder(float3 t,float3 d,float3 c,float r,float h){
if(t.y <= h&&t.y - d.y * 5>h){
return make_float4(t.x, t.y, t.z, 3.0);
}
if ((t.x - c.x)*(t.x - c.x) + (t.z - c.z)*(t.z - c.z) <= r*r &&
(t.x - d.x * 5 - c.x)*(t.x - d.x * 5 - c.x) + (t.z - d.z * 5 - c.z)*(t.z - d.z * 5 - c.z) > r*r){
return make_float4(t.x, t.y, t.z, 9.0);
}
}
__device__ float4 rayFromShpere(float3 s, float3 dir){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 7.0;
float k;
float x;
float y;
float z;
float3 d = normalize(dir);
float R = 140;
float3 t = s;
for (int i = 0; i < 100; i++){
t.x += d.x * 5;
t.y += d.y * 5;
t.z += d.z * 5;
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX,CubeY,CubeZ), 200)){
return HitTheCube(t, d, make_float3(CubeX, CubeY, CubeZ), 200);
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return HitTheCylinder(t, d, make_float3(CyX, CyY, CyZ), 100, CyHeight);
}
//z = 0; 7.0
if (t.z >= 0 && t.z - 5 * d.z < 0){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 7.0;
return rst;
}
//z=-600 5.0
//x = 0; 1.0
if (t.x <= 0 && t.x - 5 * d.x > 0){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 1.0;
return rst;
}
//x = 1200; 2.0
if (t.x >= 1200 && t.x - 5 * d.x < 1200){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 2.0;
return rst;
}
//y = 0; 3.0
if (t.y <= 0 && t.y - 5 * d.y > 0){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 3.0;
return rst;
}
//y = 600; 4.0
if (t.y >= 600 && t.y - 5 * d.y < 600){
rst.x = t.x - d.x*2.5;
rst.y = t.y - d.y*2.5;
rst.z = t.z - d.z*2.5;
rst.w = 4.0;
return rst;
}
}
return rst;
}
__device__ bool IsHitTheBall(float3 e, float3 p, float3 cen, float R){
float a = (p.x - e.x)*(p.x - e.x) + (p.y - e.y)*(p.y - e.y) + (p.z - e.z)*(p.z - e.z);
float b = 2 * ((p.x - e.x)*(e.x - cen.x) + (p.y - e.y)*(e.y - cen.y) + (p.z - e.z)*(e.z - cen.z));
float c = (e.x - cen.x)*(e.x - cen.x) + (e.y - cen.y)*(e.y - cen.y) + (e.z - cen.z)*(e.z - cen.z) - R*R;
if (chekcSolution(a, b, c) == true){
return true;
}
return false;
}
__device__ float4 HitTheBall(float3 e, float3 p,float3 cen,float R){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 0.0;
float k;
float a = (p.x - e.x)*(p.x - e.x) + (p.y - e.y)*(p.y - e.y) + (p.z - e.z)*(p.z - e.z);
float b = 2 * ((p.x - e.x)*(e.x - cen.x) + (p.y - e.y)*(e.y - cen.y) + (p.z - e.z)*(e.z - cen.z));
float c = (e.x - cen.x)*(e.x - cen.x) + (e.y - cen.y)*(e.y - cen.y) + (e.z - cen.z)*(e.z - cen.z) - R*R;
//hit the ball
k = getSolution1(a, b, c);
rst.x = (p.x - e.x)*k + e.x;
rst.y = (p.y - e.y)*k + e.y;
rst.z = (p.z - e.z)*k + e.z;
rst.w = 6.0;
float3 L1 = make_float3((p.x - rst.x), (p.y - rst.y), (p.z - rst.z));
L1 = normalize(L1);
float3 N = make_float3((rst.x - cen.x), (rst.y - cen.y), (rst.z - cen.z));
N = normalize(N);
float3 L2 = make_float3(-2 * dot(L1, N)*N.x + L1.x, -2 * dot(L1, N)*N.y + L1.y, -2 * dot(L1, N)*N.z + L1.z);
//ѡ
if (HasTheBallFlection)return rayFromShpere(make_float3(rst.x, rst.y, rst.z), L2);
return rst;
}
__device__ float4 HitTheMirror(float3 e, float3 p, float3 cen){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 7.0;
float k;
float x;
float y;
float z;
float R = 140;
if (HasTheBall&&IsHitTheBall(e, p, cen, R) == true){
return HitTheBall(e, p, cen, R);
}
float3 d = normalize(make_float3(p.x - e.x, p.y - e.y, p.z - e.z));
float3 t = p;
for (int i = 0; i < 200; i++){
t = make_float3(t.x + d.x * 5, t.y + d.y * 5, t.z + d.z * 5);
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX, CubeY, CubeZ), 200)){
return HitTheCube(t, d, make_float3(CubeX, CubeY, CubeZ), 200);
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return HitTheCylinder(t, d, make_float3(CyX, CyY, CyZ), 100, CyHeight);
}
}
z = 0;
k = (z - e.z) / (p.z - e.z);
x = (p.x - e.x)*k + e.x;
y = (p.y - e.y)*k + e.y;
if (x >= 0 && x <= 1200 && y >= 0 && y <= 600){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 7.0;
return rst;
}
x = 0;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 1.0;
return rst;
}
x = 1200;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 2.0;
return rst;
}
y = 0;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 3.0;
return rst;
}
y = 600;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 4.0;
return rst;
}
return rst;
}
__device__ float4 HitCurveMirror(float3 s, float3 d,float3 ball){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 7.0;
float3 L1;
float3 N;
float3 L2;
float3 t = s;
d = normalize(d);
//hit poin
for (int i = 0; i < 200; i++){
t.x += d.x * 5;
t.y += d.y * 5;
t.z += d.z * 5;
if (t.y>500)return(make_float4(t.x, t.y, t.z, 4.0));
if ((t.x - 600)*(t.x - 600) + (t.z + 225)*(t.z + 225) >= 625 * 625 && (t.x - d.x * 5 - 600)*(t.x - d.x * 5 - 600) + (t.z - d.z * 5 + 225)*(t.z - d.z * 5 + 225) < 625 * 625){
L1 = make_float3(-d.x, -d.y, -d.z);
L1 = normalize(L1);
N = make_float3(600 - t.x, 0,-225 - t.z);
N = normalize(N);
L2 = make_float3(2 * dot(L1, N)*N.x - L1.x, 2 * dot(L1, N)*N.y - L1.y, 2 * dot(L1, N)*N.z - L1.z);
return HitTheMirror(t, make_float3(t.x + L2.x, t.y + L2.y, t.z + L2.z), ball);
break;
}
}
return rst;
}
__device__ float4 HitTheWall(float3 e,float3 p,float3 cen){
float4 rst;
rst.x = 0.0;
rst.y = 0.0;
rst.z = 0.0;
rst.w = 0.0;
float x;
float y;
float z;
float k;
z = -600;
k = (z - e.z) / (p.z - e.z);
x = (p.x - e.x)*k + e.x;
y = (p.y - e.y)*k + e.y;
if (x >= 0 && x <= 1200 && y >= 0 && y <= 600){
if (x >= 100 && x <= 1100 && y >= 100 && y <= 550){
if (HasTheMirror){
if (HasTheCurve){
return HitCurveMirror(make_float3(p.x, p.y, p.z), make_float3(p.x - e.x, p.y - e.y, p.z - e.z), make_float3(cen.x, cen.y, cen.z));
}
return HitTheMirror(make_float3(e.x,e.y,-1200-e.z), make_float3(x,y,z), cen);
}
if (!HasTheMirror){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 5.0;
return rst;
}
}
else{
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 5.0;
return rst;
}
}
x = 0;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 1.0;
return rst;
}
x = 1200;
k = (x - e.x) / (p.x - e.x);
y = (p.y - e.y)*k + e.y;
z = (p.z - e.z)*k + e.z;
if (y >= 0 && y <= 600 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 2.0;
return rst;
}
y = 0;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 3.0;
return rst;
}
y = 600;
k = (y - e.y) / (p.y - e.y);
x = (p.x - e.x)*k + e.x;
z = (p.z - e.z)*k + e.z;
if (x >= 0 && x <= 1200 && z >= -600 && z <= 0){
rst.x = x;
rst.y = y;
rst.z = z;
rst.w = 4.0;
if ((x - 600)*(x - 600) + (z + 300)*(z + 300)<100 * 100)rst.w = 8.0;
return rst;
}
return rst;
}
__device__ float4 getHitPoint(float3 e, float3 p, float3 cen){
//hit the ball
float R = 140;
if (IsHitTheBall(e, p, cen, R) == true && HasTheBall==true){
return HitTheBall(e, p, cen, R);
}
//hit the cube and the cylinder
float3 d = normalize(make_float3(p.x-e.x,p.y-e.y,p.z-e.z));
float3 t = p;
for (int i = 0; i < 100; i++){
t = make_float3(t.x + d.x * 5, t.y + d.y * 5, t.z + d.z * 5);
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX, CubeY, CubeZ), 200)){
return HitTheCube(t, d, make_float3(CubeX, CubeY, CubeZ), 200);
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return HitTheCylinder(t, d, make_float3(CyX, CyY, CyZ), 100, CyHeight);
}
}
//hit the wall
return HitTheWall(e, p, cen);
}
__device__ float3 getNormal(float4 p,float cx,float cy,float cz){
float3 N;
if (p.w != 0.0){
if (p.w == 6.0){
N = make_float3(p.x - cx, p.y - cy, p.z - cz);
}
if (p.w == 5.0){
N = make_float3(0, 0, 1);
}
if (p.w == 1.0){
N = make_float3(1, 0, 0);
}
if (p.w == 2.0){
N = make_float3(-1, 0, 0);
}
if (p.w == 3.0){
N = make_float3(0, 1, 0);
}
if (p.w == 4.0){
N = make_float3(0, -1, 0);
}
if (p.w == 7.0){
N = make_float3(0, 0, -1);
}
if (p.w == 9.0){
N = make_float3(p.x-800,0,p.z+300);
}
}
N = normalize(N);
return N;
}
__device__ float4 getColor(float4 p,float3 n,float ex,float ey,float ez){
float dist = (p.x - ex)*(p.x - ex) + (p.y - ey)*(p.y - ey) + (p.z - ez)*(p.z - ez);
dist /= 1200000;
if (dist < 1)dist = 1;
//cuda dyD,dyS,dyA ɲ
float4 kd = make_float4(0.5, 0.5, 0.5, 1.0);
float4 ks = make_float4(0.0, 0.0, 0.1, 1.0);
float4 ka = make_float4(0.1, 0.1, 0.1, 1.0);
float4 dyDiffuse = make_float4(1.0, 1.0, 1.0, 1.0);
float4 dySpecular = make_float4(0.5, 0.5, 0.5, 1.0);
float4 dyAmbient = make_float4(0.2, 0.2, 0.2, 1.0);
if (p.w == 6.0){//the ball
kd = make_float4(0.5, 0.5, 0.9, 1.0);
ks = make_float4(0.0, 0.0, 0.0, 1.0);
ka = make_float4(0.5, 0.5, 0.5, 1.0);
}
if (p.w == 5.0){//back wall
kd = make_float4(0.0, 0.6, 0.0, 1.0);
ks = make_float4(0.9, 0.0, 0.0, 1.0);
ka = make_float4(0.05, 0.0, 0.0, 1.0);
}
if (p.w == 1.0){//left wall
kd = make_float4(0.5, 0.0, 0.0, 1.0);
ks = make_float4(0.1, 0.0, 0.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 2.0){//right wall
kd = make_float4(0.0, 0.0, 0.5, 1.0);
ks = make_float4(0.1, 0.0, 0.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 3.0){//floor
kd = make_float4(0.0, 0.5, 0.5, 1.0);
ks = make_float4(1.0, 1.0, 1.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 4.0){//ceil
kd = make_float4(0.0, 0.5, 0.5, 1.0);
ks = make_float4(1.0, 1.0, 1.0, 1.0);
ka = make_float4(0.9, 0.9, 0.1, 1.0);
}
if (p.w == 7.0){//front wall
kd = make_float4(0.5, 0.0, 0.7, 1.0);
ks = make_float4(0.4, 0.4, 0.4, 1.0);
ka = make_float4(0.4, 0.4, 0.4, 1.0);
}
if (p.w == 9.0){
kd = make_float4(0.0, 1.0, 1.0, 1.0);
ks = make_float4(0.4, 0.4, 0.4, 1.0);
ka = make_float4(0.4, 0.4, 0.4, 1.0);
}
float3 V = normalize(make_float3(ex - p.x, ey - p.y, ez - p.z));
float3 L = normalize(make_float3(600 - p.x, 600 - p.y, -300 - p.z));
float3 H = normalize(make_float3(V.x + L.x, V.y + L.y, V.z + L.z));
float4 ambient1 = make_float4(ka.x*dyAmbient.x , ka.y*dyAmbient.y , ka.z*dyAmbient.z , ka.w*dyAmbient.w );
float max1 = bigger(dot(n, L), 0.0f);
float4 diffuse1 = make_float4(kd.x*max1*dyDiffuse.x / dist, kd.y*max1*dyDiffuse.y / dist, kd.z*max1*dyDiffuse.z / dist, kd.w*max1*dyDiffuse.w / dist);
float max2 = powf(bigger(dot(n, H), 0.0f),10.0f);
float4 specular1 = make_float4(ks.x*max2*dySpecular.x,ks.y*max2*dySpecular.y, ks.z*max2*dySpecular.z, ks.w*max2*dySpecular.w);
if(dot(n,L)<0) specular1 =make_float4(0.0,0.0,0.0,0.0);
float4 color1 = make_float4(ambient1.x + diffuse1.x+specular1.x,
ambient1.y + diffuse1.y + specular1.y,
ambient1.z + diffuse1.z + specular1.z,
ambient1.w + diffuse1.w + specular1.w);
if (p.w == 8.0){
color1 = make_float4(1.0, 1.0, 0.0, 1.0);
}
return color1;
}
__device__ bool shadowRay(float3 s, float3 e, float3 center, float R){
int divide = 100;
float divX = (e.x - s.x) / divide;
float divY = (e.y - s.y) / divide;
float divZ = (e.z - s.z) / divide;
float3 t = s;
for (int i = 0; i < divide; i++){
t.x += divX;
t.y += divY;
t.z += divZ;
if (HasTheBall&&((t.x - divX - center.x)*(t.x - divX - center.x) + (t.y - divY - center.y)*(t.y - divY - center.y) + (t.z - divZ - center.z)*(t.z - divZ - center.z) > R*R) && ((t.x - center.x)*(t.x - center.x) + (t.y - center.y)*(t.y - center.y) + (t.z - center.z)*(t.z - center.z) <= R*R)){
return true;
break;
}
if (HasTheCube&&IsHitTheCube(t, make_float3(CubeX, CubeY, CubeZ), 200) == true){
return true;
break;
}
if (HasTheCy&&IsHitTheCylinder(t, make_float3(CyX, CyY, CyZ), 100, CyHeight)){
return true;
break;
}
}
return false;
}
//global
__global__ void computeSingleRay(char* tex){
//vec4 temp = getHitPoint(Ex, Ey, Ez, vPosition.x, vPosition.y, vPosition.z);
//width height ӦDz
//position=thread.x
//̴߳λ
//int j = threadIdx.x;
//int i = blockIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
float Ex = 600;
float Ey = 300;
float Ez = 800;
float Cx = 200;
float Cy = 300;
float Cz = -350;
float3 E = make_float3(Ex, Ey, Ez);
float3 P = make_float3(i, j, 0);
float3 C = make_float3(Cx,Cy, Cz);
//float Cx = 200;
//hit λ float4
float4 position = getHitPoint(E,P,C);
//λnormal
float3 normal = getNormal(position,Cx,Cy,Cz);
//normalɫ vec4
float4 color = getColor(position, normal,Ex,Ey,Ez);
float3 p = make_float3(position.x, position.y, position.z);
float3 e = make_float3(600, 600, -300);
float3 c = make_float3(Cx, Cy, Cz);
if (HasTheShadow&&shadowRay(p, e, c, 140) && position.w != 6.0)color = make_float4(color.x*0.2, color.y*0.2, color.z*0.2, 1);
tex[j * 1200 * 3 + i * 3] = color.x*255;
tex[j * 1200 * 3 + i * 3 + 1] = color.y*255;
tex[j * 1200 * 3 + i * 3 + 2] = color.z*255;
}
//1200*600 size, Ex Ey Ez
void computeRays(int width,int height,char *tex){
char * dev_Tex;
cudaMalloc((char**)&dev_Tex, 3 * width * height * sizeof(char));
dim3 block(8, 8, 1);
dim3 grid(width/ block.x, height / block.y, 1);
computeSingleRay << <grid, block >> >(dev_Tex);
cudaMemcpy(tex, dev_Tex, 3 * width * height * sizeof(char), cudaMemcpyDeviceToHost);
cudaFree(dev_Tex);
}
|
19,139 | #include "includes.h"
// First solution with global memory
// Shared memory residual calculation
// Reduction code from CUDA Slides - Mark Harris
__global__ void gpu_HeatReduction (float *res, float *result) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int index= blockIdx.x*blockDim.x+ threadIdx.x;
sdata[tid] = res[index];
__syncthreads();
// Reduce the shared table to compute the residual
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0)
{
int blockIndex = blockIdx.x;
result[blockIndex] = sdata[tid];
}
} |
19,140 | __global__ void findMaxInAccum(unsigned int* accum, int w_accum, int h_accum, int* dev_points, int* max)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int tid = y * w_accum + x;
if (x >= w_accum || y >= h_accum)
return;
int old = (int)accum[tid];
atomicMax(&max[0], (int)accum[tid]);
if (old == max[0]) {
atomicExch(&dev_points[0], x);
atomicExch(&dev_points[1], y);
}
return;
} |
19,141 | #include "includes.h"
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
} |
19,142 | #include <cstdio>
#include <cstdlib>
#include <algorithm>
#include <iterator>
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <cuda_runtime.h>
// #include <cublas_v2.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/iterator/constant_iterator.h>
#define ll long long
__device__ __constant__ unsigned ll m1[16*16] = {
3898138, 4272236, 3818297, 3809756, 4037852, 4676915, 3500886, 3619362, 4730634, 2829970, 3517506, 4087421, 4739601, 3912997, 3681388, 6359441, 3255282, 6285463, 5859578, 6306639, 5707605, 6090034, 3791157, 3023551, 6494436, 5639060, 2442072, 4931038, 3374951, 2973521, 3534381, 2006977, 4512006, 2730539, 4739724, 3385241, 4656916, 3938940, 3796672, 4762700, 6592658, 4096400, 3402646, 4623696, 6895815, 5140537, 3059772, 6976185, 4090503, 3847150, 5493941, 4084221, 5289630, 3366036, 3877302, 6044678, 5833958, 3454838, 5369570, 4076362, 4112938, 3652335, 5586920, 4455035, 3530871, 5166626, 2851545, 6459783, 3200179, 3905475, 4882530, 6386745, 4939154, 6054883, 6988990, 2800608, 4300093, 3029774, 6468896, 5294418, 4392603, 4249913, 5528383, 3092627, 5421793, 3344070, 6070234, 3644930, 4301352, 4607155, 5854072, 5832719, 5571846, 5018476, 1457339, 3594239, 3309544, 2658855, 3726900, 5894573, 5658810, 3933981, 6213873, 4204623, 4381714, 6075984, 5200611, 3378648, 3182854, 5359960, 5160722, 4496497, 6592430, 5971095, 4102753, 3446501, 2707085, 4192901, 3942405, 4480361, 5734622, 6092668, 5815420, 2193260, 3654966, 4195351, 4400242, 4859107, 4152135, 1848942, 5410256, 5432025, 4207561, 3319602, 4141769, 4403163, 2130807, 5398166, 4417568, 4774938, 1706853, 4677230, 6013745, 3612519, 4617344, 3915945, 6005133, 4442264, 5757908, 5010119, 3258015, 3542214, 3510823, 3080679, 4174442, 5279308, 3338275, 1425392, 4225674, 4111864, 5243952, 4014867, 3083188, 3784711, 3580232, 4758474, 3637293, 2206083, 2646303, 2474987, 6277880, 1158152, 3658919, 5452372, 3882484, 4304057, 3165794, 2191558, 5989865, 5078279, 6020807, 4652723, 3458259, 5856963, 4341712, 5928672, 6142960, 5079986, 4026505, 7370147, 5035209, 4660278, 3923764, 2550655, 0, 4165580, 4629051, 3069243, 3734965, 2812772, 2957817, 5182954, 4006232, 4623017, 6091379, 4084730, 3843241, 4139640, 5351058, 5051956, 6396350, 3120492, 4636888, 3984535, 6898848, 3942165, 4713424, 3750126, 6147513, 5964568, 5260514, 5227714, 4282723, 4550280, 6269824, 5316692, 6081364, 5527953, 4178586, 3084830, 4026779, 5714687, 5225745, 5787677, 4253001, 4572657, 3353132, 5175311, 3802632, 6154279, 4099225, 4429291, 3657324, 5316210, 4680313, 3756697, 5949820, 5268209, 4685384, 4284995, 3783679, 3954066, 3515489, 4653612, 5879525, 5531160
};
__device__ __constant__ unsigned ll m2[16*16] = {
5402623, 3995627, 6743696, 3542762, 4575592, 3695284, 3918622, 4538491, 6181070, 2512172, 4698104, 4163423, 4292282, 4453561, 1477165, 4966421, 4736401, 4810828, 5344335, 4837246, 4286233, 2634311, 4147100, 1921287, 6340262, 4113615, 3795215, 4211793, 2113035, 2556419, 2938045, 5122808, 4298089, 4512884, 3691197, 5534985, 4851922, 2772637, 2236804, 6120396, 2929315, 4143837, 3787446, 3239459, 4775456, 865982, 4496660, 1703666, 1599063, 4549671, 3501246, 1249567, 3323392, 4137217, 4195107, 6887026, 4291745, 5286819, 4357805, 2681316, 3140604, 4414659, 2713437, 4684680, 4623493, 2717830, 5068866, 5504463, 4723859, 6560104, 3436816, 5727689, 1648325, 4410464, 3260375, 3527822, 3801152, 5313457, 3446586, 5860645, 4737773, 5422197, 2731218, 4057691, 4550777, 5336448, 3092465, 2570986, 4040807, 3075376, 4867819, 4488883, 5773716, 3522895, 2814470, 5650927, 4377915, 4178781, 4884170, 3743092, 2828557, 4740730, 3232547, 4097158, 4680480, 5160081, 4033217, 2703093, 1601640, 5648239, 3977663, 5292342, 1858079, 5437452, 5778958, 3868468, 4060999, 3947463, 5609988, 4810874, 5726724, 4889048, 3297829, 4738813, 6375997, 3406592, 5284049, 4546058, 5626721, 4157315, 6594087, 6505334, 7104890, 3101450, 4471290, 4693939, 1906608, 6790147, 2749917, 4280580, 4847134, 5819494, 2243550, 2962854, 1061216, 3903053, 5066876, 4876405, 4163034, 4753225, 6430668, 4729571, 4429114, 5100039, 6968806, 4146280, 4409023, 4459757, 2614041, 5123350, 4132817, 4575138, 3217081, 4959027, 3843245, 6940258, 4267578, 3587797, 3931136, 5217963, 6225758, 2514754, 4230712, 5868142, 4260445, 4583537, 4107203, 1696201, 3402111, 4265050, 5518600, 5989181, 1619745, 3704475, 4369304, 2739916, 4914357, 3048664, 5007979, 4977233, 2376675, 3990140, 1889822, 3240595, 2616037, 2789849, 4489124, 4465373, 4793428, 6241130, 674170, 3887638, 3952595, 3276410, 3403888, 3426655, 5523518, 3812274, 5021758, 4478267, 3835431, 2826298, 3810009, 3389970, 4590227, 5646501, 698792, 4943485, 3322046, 3383689, 2234016, 2160407, 2022331, 5654802, 5180190, 3407774, 3774041, 4347928, 5845572, 2056892, 6877570, 2917990, 3144028, 2784578, 4687680, 4418303, 4558507, 5042387, 3523901, 4224349, 3338556, 5510521, 2140311, 3958657, 4013730, 1295751, 3730229, 1555011, 4265546, 4450370, 2322128, 1232047, 5340683, 3901175, 5102033, 4321191
};
// __device__ __constant__ ll test1[4] = {2, 1, 2, 1};
// __device__ __constant__ ll test2[4] = {1, 1, 2, 1};
// __global__ void Matrix_Multiplication_AB_Kernel_Poorman(const unsigned ll* Ae,const unsigned ll* Be, unsigned ll* Ce,const int Am,const int An,const int Bn)
// {
// int i=blockIdx.x*blockDim.x+threadIdx.x;
// int j=blockIdx.y*blockDim.y+threadIdx.y;
// ll val=0;
// for(int k=0;k<An;k++)
// val+=Ae[i*An+k]*Be[k*Bn+j];
// Ce[i*Bn+j]=val;
// }
__global__ void MM_Key(const unsigned ll* Ae, const unsigned ll* Be, unsigned ll* Ce,const int Am,const int An,const int Bn)
{
// initialize memory
__shared__ unsigned ll a_shared[16][16];
__shared__ unsigned ll b_shared[16][16];
__shared__ unsigned ll c_shared[16][16];
int c_idx = threadIdx.y * 16 + threadIdx.x;
c_shared[threadIdx.y][threadIdx.x] = 0; // set everything to zero just the first time
a_shared[threadIdx.y][threadIdx.x] = Ae[c_idx];
b_shared[threadIdx.y][threadIdx.x] = Be[c_idx];
__syncthreads();
// lmao loop unrolling time my dudes
#pragma unroll
for (int i = 0; i < 16; i++) {
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][i] * b_shared[i][threadIdx.x];
}
__syncthreads();
// save to global
Ce[c_idx] = c_shared[threadIdx.y][threadIdx.x];
}
__global__ void MM_Sum(const unsigned ll* Ae, const unsigned ll* Be, unsigned ll* sum,const int Am,const int An,const int Bn)
{
// initialize memory
__shared__ unsigned ll a_shared[16][16];
__shared__ unsigned ll b_shared[16][16];
__shared__ unsigned ll c_shared[16][16];
int c_idx = threadIdx.y * 16 + threadIdx.x;
c_shared[threadIdx.y][threadIdx.x] = 0; // set everything to zero just the first time
a_shared[threadIdx.y][threadIdx.x] = Ae[c_idx];
b_shared[threadIdx.y][threadIdx.x] = Be[c_idx];
__syncthreads();
// lmao loop unrolling time my dudes
#pragma unroll
for (int i = 0; i < 16; i++) {
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][i] * b_shared[i][threadIdx.x];
}
__syncthreads();
// save to global
atomicAdd(&sum[0], c_shared[threadIdx.y][threadIdx.x]);
}
// __host__ void print_1D( ll* data)
// {
// for (int i = 0; i < 16; i++) {
// std::cout << data[i] << ' ';
// }
// std::cout << std::endl;
// }
int main()
{
// Create a handle for CUBLAS
// cublasHandle_t handle;
// cublasCreate(&handle);
// int lda = 16, ldb = 16, ldc = 16;
// int lda = 2, ldb = 2, ldc = 2;
// const unsigned ll alf = 1;
// const unsigned ll bet = 0;
// const unsigned ll *alpha = &alf;
// const unsigned ll *beta = &bet;
// thrust::device_vector<unsigned ll> output_m(16*16);
// unsigned ll* output_raw = thrust::raw_pointer_cast(output_m.data());
unsigned ll* m1_raw = nullptr;
cudaGetSymbolAddress((void**)&m1_raw, m1);
unsigned ll* m2_raw = nullptr;
cudaGetSymbolAddress((void**)&m2_raw, m2);
unsigned ll* d;
cudaMalloc((void**)&d, sizeof(unsigned ll));
// MM_Key<<<1,dim3(16,16)>>>(m1_raw,m2_raw,output_raw,16,16,16);
MM_Sum<<<1,dim3(16,16)>>>(m1_raw,m2_raw,d,16,16,16);
// thrust::constant_iterator<int> iter(0);
// int e;
// cudaMalloc((void**)&e, sizeof(int));
// thrust::pair<int*, unsigned ll*> new_end;
// thrust::equal_to<int> binary_pred;
// new_end = thrust::reduce_by_key(iter, iter + 16*16, output_raw, e, &d, binary_pred);
unsigned ll* d_host = (unsigned ll*)malloc(sizeof(unsigned ll));
cudaMemcpy(d_host, d, sizeof(unsigned ll), cudaMemcpyDeviceToHost);
// thrust::device_vector< ll> output_m(2*2);
// ll* output_raw = thrust::raw_pointer_cast(output_m.data());
// ll* m1_raw = nullptr;
// cudaGetSymbolAddress((void**)&m1_raw, test1);
// ll* m2_raw = nullptr;
// cudaGetSymbolAddress((void**)&m2_raw, test2);
// Do the actual multiplication
// cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 16, 16, 16, alpha, m1_raw, lda, m2_raw, ldb, beta, output_raw, ldc);
// thrust::host_vector< ll> host = output_m;
// ll* output_host = thrust::raw_pointer_cast(host.data());
// print_1D(output_host);
// ll key = ( ll)
// unsigned ll checkkey = (unsigned ll)thrust::reduce(output_m.begin(), output_m.end(), (unsigned ll)0, thrust::plus<unsigned ll>());
// checkkey += 0/*3066470427934761*/;
std::cout<<*d_host<<" calculated"<<std::endl;
std::cout<<920403722748280569-845690870767227654<<" expected"<<std::endl;
// Destroy the handle
// cublasDestroy(handle);
// cudaDeviceSynchronize();
}
|
19,143 | #include <assert.h>
#include <cstdio>
#include <random>
using namespace std;
#define CUDA_CALL(F, ...)\
if((F(__VA_ARGS__)) != cudaSuccess){\
cudaError_t e = cudaGetLastError();\
printf("CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e));\
return(EXIT_FAILURE);\
}
#define CURAND_CALL(F, ...)\
if((F(__VA_ARGS__)) != CURAND_STATUS_SUCCESS){\
cudaError_t e = cudaGetLastError();\
if(e != cudaSuccess){\
printf("CuRAND failure %s:%d: '%s'\n",__FILE__,__LINE__, cudaGetErrorString(e));\
}\
return(EXIT_FAILURE);\
}
#define PRINT_1D_I(A, S)\
printf("[");\
for(int i = 0; i < S; i++){\
printf("%d, ", A[i]);\
}\
printf("]\n");
#define PRINT_1D_F(A, S)\
printf("[");\
for(int i = 0; i < S; i++){\
printf("%f, ", A[i]);\
}\
printf("]\n");
#define FILL_1D(A, S, V)\
for(int i = 0; i < S; i++){\
A[i] = V;\
}
#define PRINT_FLAT2D(A, WIDTH, HEIGHT)\
printf("[\n");\
for(int i = 0; i < WIDTH; i++){\
printf("[");\
for(int j = 0; j < HEIGHT; j++){\
printf("%f, ", A[i + j * WIDTH]);\
}\
printf("]\n");\
}\
printf("]\n");
struct Cluster{
float sum;
int count;
};
__device__ float euclidian_dist(const float a, const float b){
return (a - b) * (a - b);
}
__global__ void relabel_k(const float* src, const float* clusters, int n, int nClusters, int* labels){
int pos = threadIdx.x + blockIdx.x * blockDim.x;
if(pos < n){
float minDist = 1.0f;
int clusterIndex = 0;
for(int c = 0; c < nClusters; c++){
float dist = euclidian_dist(src[pos], clusters[c]);
if(dist <= minDist){
clusterIndex = c;
minDist = dist;
}
}
labels[pos] = clusterIndex;
}
}
__global__ void calculateClusters_k(const float* src, const int* labels, int n, int clusterIndex, Cluster* dst){
extern __shared__ Cluster _clusters[];
int pos = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
_clusters[tid] = Cluster();
_clusters[tid].sum = 0.0f;
_clusters[tid].count = 0;
if(pos < n && labels[pos] == clusterIndex){
_clusters[tid].sum = src[pos];
_clusters[tid].count = 1;
}
__syncthreads();
for(unsigned int stride = blockDim.x / 2; stride > 0; stride /= 2){
if(threadIdx.x < stride){
_clusters[tid].sum += _clusters[tid + stride].sum;
_clusters[tid].count += _clusters[tid + stride].count;
}
__syncthreads();
}
__syncthreads();
if(threadIdx.x == 0){
dst[blockIdx.x].sum = _clusters[0].sum;
dst[blockIdx.x].count = _clusters[0].count;
printf("BlockId = %d, Sum = %f, Count = %d\n", blockIdx.x, _clusters[0].sum, _clusters[0].count);
}
}
__global__ void findCenters_k(const Cluster* src, int n, int clusterIndex, float* dst){
extern __shared__ Cluster _clusters[];
int pos = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
_clusters[tid] = src[pos];
__syncthreads();
for(unsigned int stride = blockDim.x / 2; stride > 0; stride /= 2){
if(threadIdx.x < stride){
_clusters[tid].sum += _clusters[tid + stride].sum;
_clusters[tid].count += _clusters[tid + stride].count;
}
__syncthreads();
}
__syncthreads();
if(tid == 0){
printf("Cluster = %d, Sum = %f, Count = %d\n", clusterIndex, _clusters[0].sum, _clusters[0].count);
dst[clusterIndex] = _clusters[0].count > 0 ? _clusters[0].sum / _clusters[0].count : 0.0f;
}
}
int main(){
random_device rd;
mt19937 gen(rd());
uniform_real_distribution<> dis(0.0, 1.0);
const int N = 512 * 512;
const int N_CLUSTERS = 5;
const int BLOCKDIM = 1024;
const int MAX_IT = 10;
const int blockSize = (N + BLOCKDIM - 1) / BLOCKDIM;
float* src = new float[N];
int* labels = new int[N];
float* centers = new float[N_CLUSTERS];
float* src_d, *centers_d;
Cluster* clusters_d;
int* labels_d;
CUDA_CALL(cudaMalloc, (void**)&src_d, sizeof(float) * N);
CUDA_CALL(cudaMalloc, (void**)&labels_d, sizeof(int) * N);
CUDA_CALL(cudaMalloc, (void**)¢ers_d, sizeof(float) * N_CLUSTERS);
CUDA_CALL(cudaMalloc, (void**)&clusters_d, sizeof(struct Cluster) * (blockSize));
FILL_1D(src, N, dis(gen));
FILL_1D(centers, N_CLUSTERS, dis(gen));
CUDA_CALL(cudaMemcpy, src_d, src, sizeof(float) * N, cudaMemcpyHostToDevice);
CUDA_CALL(cudaMemcpy, centers_d, centers, sizeof(float) * N_CLUSTERS, cudaMemcpyHostToDevice);
for(int it = 0; it < MAX_IT; it++){
relabel_k<<<blockSize, BLOCKDIM>>>(src_d, centers_d, N, N_CLUSTERS, labels_d);
for(int c = 0; c < N_CLUSTERS; c++){
calculateClusters_k<<<blockSize, BLOCKDIM, sizeof(struct Cluster) * BLOCKDIM>>>(src_d, labels_d, N, c, clusters_d);
findCenters_k<<<1, blockSize, sizeof(struct Cluster) * blockSize>>>(clusters_d, N, c, centers_d);
}
}
cudaDeviceSynchronize();
CUDA_CALL(cudaMemcpy, labels, labels_d, sizeof(int) * N, cudaMemcpyDeviceToHost);
CUDA_CALL(cudaMemcpy, centers, centers_d, sizeof(float) * N_CLUSTERS, cudaMemcpyDeviceToHost);
printf("Blocks = %d\n", blockSize);
// PRINT_1D_I(labels, N);
PRINT_1D_F(centers, N_CLUSTERS);
int* freq = new int[N_CLUSTERS];
memset(freq, 0, sizeof(int) * N_CLUSTERS);
for(int i = 0; i < N; i++){
freq[labels[i]]++;
}
int total = 0;
for(int i = 0; i < N_CLUSTERS; i++)
total += freq[i];
assert(total == N);
CUDA_CALL(cudaFree, src_d);
CUDA_CALL(cudaFree, clusters_d);
CUDA_CALL(cudaFree, centers_d);
CUDA_CALL(cudaFree, labels_d);
delete[] src;
delete[] labels;
delete[] centers;
return EXIT_SUCCESS;
}
|
19,144 | #include <math.h>
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which thread are we within the block?
size_t const localThreadIdx = threadIdx.x + blockDim.x * blockIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x * blockDim.y;
// Which thread are we overall?
return localThreadIdx + globalBlockIndex*threadsPerBlock;
}
__device__ void dire_weight(double *wd, double *x, double *y, size_t len) {
double tx = x[0];
double ty = x[1];
for (int i = 0; i < len; i++) {
wd[i] = y[i]*tx+y[9+i]*ty;
}
}
__device__ void mag_weight(double *wm, double x, double *y, size_t len) {
for (int i = 0; i < len; i++) {
wm[i] = 0.5*(1+y[i]-x);
}
}
/*__device__ double* local_win(size_t pos, size_t ih, size_t iw, double *im) {
double lwin[9];
int count = 0;
int locator = pos/iw;
// 1,0,0,
// 0,0,0,
// 0,0,0.
if( (pos-iw-1)>=0 && ((pos-iw-1)/iw)>=0 ) {
lwin[count] = im[pos-iw-1];
count++;
}
// 0,0,0,
// 1,0,0,
// 0,0,0.
if( (pos-1)>=0 && ((pos-1)/iw)>=0 ) {
lwin[count] = im[pos-1];
count++;
}
// 0,0,0,
// 0,0,0,
// 1,0,0.
if((pos+iw-1)>0) {
lwin[count] = im[pos+iw-1];
count++;
}
// 0,1,0,
// 0,0,0,
// 0,0,0.
if((pos-iw)>0) {
lwin[count] = im[pos-iw];
count++;
}
// 0,0,0,
// 0,1,0,
// 0,0,0.
lwin[count] = im[pos];
count++;
}
*/
__global__ void etfKernel(
double *xout, double *yout, double *magout,
const double *tx, const double *ty,
const double *im, const double *gmag,
int height, int width) {
// size_t const globalThreadIdx = calculateGlobalIndex();
// Calculate pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// Only execute valid pixels
if (x>=width || y>=height) {
return;
}
double ctrx = tx[y*width+x];
double ctry = ty[y*width+x];
double ctrgmag = gmag[y*width+x];
// initialization
// double win[9] = {0.0};
double win_tx[9] = {0.0};
double win_ty[9] = {0.0};
double win_gmag[9] = {0.0};
int count = 0;
// get local win
for (int j = -1; j < 2; j++) {
int wy = y+j;
if (wy>=0 && wy<height){
for (int i = -1; i < 2; i++) {
int wx = x+i;
if (wx>=0 && wx<width){
// win[count] = im[wy*width+wx];
win_tx[count] = tx[wy*width+wx];
win_ty[count] = ty[wy*width+wx];
win_gmag[count] = gmag[wy*width+wx];
count++;
}
}
}
}
// etf operation
double wm[9] = {0.0};
mag_weight(wm, ctrgmag, win_gmag, count);
double ctrxy[2];
ctrxy[0] = ctrx;
ctrxy[1] = ctry;
double winxy[18];
for(int i = 0; i < count; i++){
winxy[i] = win_tx[i];
winxy[i+9] = win_ty[i];
}
double wd[9] = {0.0};
dire_weight(wd, ctrxy, winxy, count);
double sum_tx = 0.0;
double sum_ty = 0.0;
for (int i = 0; i < count; i++){
sum_tx += win_tx[i]*wm[i]*wd[i];
sum_ty += win_ty[i]*wm[i]*wd[i];
}
double tmpgmag = sqrt(sum_tx*sum_tx+sum_ty*sum_ty);
if (tmpgmag != 0){
sum_tx /= tmpgmag;
sum_ty /= tmpgmag;
}
else {
sum_tx = 0;
sum_ty = 0;
}
/*
for (int i = 0; i < count; i++){
xout[y*width+x] += wm[i];
yout[y*width+x] += wd[i];
}
*/
xout[y*width+x] = sum_tx;
yout[y*width+x] = sum_ty;
magout[y*width+x] = tmpgmag;
__syncthreads();
}
|
19,145 | /******************************************************************
File : lcsBigBlockInitializationForVelocities.cu
Author : Mingcheng Chen
Last Update : January 31st, 2013
*******************************************************************/
#include <stdio.h>
#define BLOCK_SIZE 512
__global__ void BigBlockInitializationForVelocitiesKernel(double *globalStartVelocities,
double *globalEndVelocities,
int *blockedGlobalPointIDs,
int *startOffsetInPoint,
double *startVelocitiesForBig,
double *endVelocitiesForBig
) {
// Get number of threads in a work group
int numOfThreads = blockDim.x;
// Get local thread ID
int localID = threadIdx.x;
// Get interesting block ID of the current big block
int interestingBlockID = blockIdx.x;
// Declare some work arrays
double *gStartVelocities;
double *gEndVelocities;
int startPoint = startOffsetInPoint[interestingBlockID];
int numOfPoints = startOffsetInPoint[interestingBlockID + 1] - startPoint;
// Initialize startVelocities and endVelocities
gStartVelocities = startVelocitiesForBig + startPoint * 3;
gEndVelocities = endVelocitiesForBig + startPoint * 3;
for (int i = localID; i < numOfPoints * 3; i += numOfThreads) {
int localPointID = i / 3;
int dimensionID = i % 3;
int globalPointID = blockedGlobalPointIDs[startPoint + localPointID];
gStartVelocities[i] = globalStartVelocities[globalPointID * 3 + dimensionID];
gEndVelocities[i] = globalEndVelocities[globalPointID * 3 + dimensionID];
}
}
extern "C"
void BigBlockInitializationForVelocities(double *globalStartVelocities,
double *globalEndVelocities,
int *blockedGlobalPointIDs,
int *startOffsetInPoint,
double *startVelocitiesForBig,
double *endVelocitiesForBig,
int numOfInterestingBlocks
) {
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid(numOfInterestingBlocks, 1, 1);
BigBlockInitializationForVelocitiesKernel<<<dimGrid, dimBlock>>>(globalStartVelocities, globalEndVelocities, blockedGlobalPointIDs,
startOffsetInPoint, startVelocitiesForBig, endVelocitiesForBig);
cudaError_t err = cudaDeviceSynchronize();
if (err) {
cudaGetErrorString(err);
exit(0);
}
}
|
19,146 | #include "includes.h"
__global__ void nothingKernel(){ } |
19,147 | #include "includes.h"
__global__ void stencil_1d(int *in, int *out)
{
// blockDim is 3-dimensional vector storing block grid dimensions
// index of a thread across all threads + RADIUS
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += in[gindex + offset];
// Store the result
out[gindex - RADIUS] = result;
} |
19,148 | #include "includes.h"
__global__ void chooseDistance ( const int nwl, const int *kex, const float *didi11, float *didi1 ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
didi1[i] = didi11[i+kex[i]*nwl];
}
} |
19,149 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define TILE_WIDTH 32
#define COMMENT "Centrist_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(PPMImage *img) {
fprintf(stdout, "P6\n");
fprintf(stdout, "# %s\n", COMMENT);
fprintf(stdout, "%d %d\n", img->x, img->y);
fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR);
fwrite(img->data, 3 * img->x, img->y, stdout);
fclose(stdout);
}
//create a thread per pixel
__global__ void mod_CENTRIST(PPMPixel *image_out, PPMPixel *image_cp, int columns, int rows, int *hist, int hist_len) {
int col = TILE_WIDTH * blockIdx.x + threadIdx.x;
int row = TILE_WIDTH * blockIdx.y + threadIdx.y;
__shared__ int hist_private[512];
int hist_index = (threadIdx.y*TILE_WIDTH + threadIdx.x); //get index in shared histogram
if(hist_index < hist_len) hist_private[hist_index] = 0;
__syncthreads();
if(col < columns && row < rows)
{
//create and copy small chunks to shared memory
__shared__ unsigned char image_cp_private[TILE_WIDTH][TILE_WIDTH];
//convert to grayscale
int img_index = row * columns + col; //get index in original image
int grayscale = (image_cp[img_index].red*299 + image_cp[img_index].green*587 + image_cp[img_index].blue*114)/1000; //avoid float point errors
image_cp_private[threadIdx.y][threadIdx.x] = grayscale;
__syncthreads();
if(col < columns - 2 && row < rows - 2) //ignore first/last row/column
{
int r, c, rr, cc;
float mean = 0.0;
for(r = threadIdx.y, rr = row; r <= threadIdx.y + 2; r++, rr++)
for(c = threadIdx.x , cc = col; c <= threadIdx.x + 2; c++, cc++)
{
if(r < TILE_WIDTH && c < TILE_WIDTH)
{
mean += image_cp_private[r][c];
}
else
{
int grayscale_neigh = (image_cp[rr*columns + cc].red*299 + image_cp[rr*columns + cc].green*587 + image_cp[rr*columns + cc].blue*114)/1000;
mean += grayscale_neigh;
}
}
mean /= 9.0;
int value = 0, k = 8;
for(r = threadIdx.y, rr = row ; r <= threadIdx.y + 2; r++, rr++)
for(c = threadIdx.x, cc = col ; c <= threadIdx.x + 2; c++, cc++)
{
if(r < TILE_WIDTH && c < TILE_WIDTH)
{
if(1.0*image_cp_private[r][c] >= mean)
value |= 1<<k;
}
else
{
int grayscale_neigh = (image_cp[rr*columns + cc].red*299 + image_cp[rr*columns + cc].green*587 + image_cp[rr*columns + cc].blue*114)/1000;
if(grayscale_neigh >= mean)
value |= 1<<k;
}
k--;
}
int img_out_ind = row * (columns - 2) + col; //get index in ouput original
image_out[img_out_ind].red = image_out[img_out_ind].blue = image_out[img_out_ind].green = value;
atomicAdd(&(hist_private[value]), 1);
}
__syncthreads();
if(hist_index == 0)
{
for(int i = 0; i < hist_len; i++)
atomicAdd(&(hist[i]), hist_private[i]); //init shared histogram
}
}
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
char *filename = argv[1]; //Recebendo o arquivo!;
PPMImage *image = readPPM(filename);
PPMImage *image_output = readPPM(filename);
int *hist;
//device data
PPMPixel *d_image_output;
PPMPixel *d_image_copy;
int *d_hist;
//total excecution time
double offload=0.0 , kernel = 0.0;
int i_size = sizeof(PPMPixel) * image->x * image->y;
int hist_len = 512;
int hist_size = sizeof(int)*hist_len;
hist =(int *)malloc(hist_size);
int i;
for(i = 0; i < hist_len; i++) hist[i] = 0;
// Allocate space for device copies of image and h
t_start = rtclock();
cudaMalloc(&d_image_output, i_size);
cudaMalloc(&d_image_copy, i_size);
cudaMalloc(&d_hist, hist_size);
t_end = rtclock();
fprintf(stdout, "CudaMalloc %0.6lfs\n", t_end - t_start);
offload += t_end - t_start;
//copy inputs to device
t_start = rtclock();
cudaMemcpy(d_image_output, image_output->data, i_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_image_copy, image->data, i_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_hist, hist, hist_size, cudaMemcpyHostToDevice);
t_end = rtclock();
fprintf(stdout, "CopyToDevice %0.6lfs\n", t_end - t_start);
offload += t_end - t_start;
//set grids size
dim3 dimGrid(ceil((float)image -> x / TILE_WIDTH), ceil((float)image -> y / TILE_WIDTH), 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
t_start = rtclock();
mod_CENTRIST<<<dimGrid, dimBlock>>>(d_image_output, d_image_copy, image-> x, image -> y, d_hist, hist_len);
cudaDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "Kernel %0.6lfs\n", t_end - t_start);
kernel += t_end - t_start;
t_start = rtclock();
cudaMemcpy(image_output->data, d_image_output, i_size, cudaMemcpyDeviceToHost);
cudaMemcpy(hist, d_hist, hist_size, cudaMemcpyDeviceToHost);
t_end = rtclock();
fprintf(stdout, "CopyFromDevice %0.6lfs\n", t_end - t_start);
offload += t_end - t_start;
fprintf(stdout, "Offload %0.6lfs\n", offload);
fprintf(stdout, "CudaTotal %0.6lfs\n", offload + kernel);
image_output->x -= 2;
image_output->y -= 2;
float dim = (float)((image_output->x) * (image_output->y));
//writePPM(image_output);
//for(i=0; i < hist_len; i++) printf("%.4f ", hist[i]/dim);
free(image);
free(image_output);
cudaFree(d_image_output), cudaFree(d_image_copy), cudaFree(d_hist);
}
|
19,150 | #include <stdio.h>
__global__
void areDivisible(int n, int Nb, int np, int *knownprimes, bool *ans)
{
int i = blockIdx.x*blockDim.x + threadIdx.x; // between [0 and Nb[
int ni = n+i; // number to be tested
if (i<Nb)
{
ans[i] = false;
for (int j=0; j<np; j++)
{
int p = knownprimes[j];
if (ni%p==0) ans[i] = true;
// check we do not test further than sqrt(ni)
if (p*p>ni) break;
}
}
}
bool isDivisible(int n, int np, int *primes)
{
for (int i=0; i<np; i++)
{
int p = primes[i];
if (n%p==0) return true;
// check we do not test further than sqrt(n)
if (p*p>n) break;
}
return false;
}
int main(void)
{
int Np = 1<<21; // number of primes to compute
int Nb = 1<<17; // batch size of numbers tested at once on gpu
int *primes, *d_primes;
bool *ans, *d_ans;
primes = (int*) malloc(Np*sizeof(int));
ans = (bool*) malloc(Nb*sizeof(bool));
cudaMalloc(&d_primes, Np*sizeof(int));
cudaMalloc(&d_ans, Nb*sizeof(bool));
// init
int n=2;
int np=0;
// serial search for primes between 0 and Nb
while(n<Nb)
{
// test if n is prime by checking division by previous primes
if (!isDivisible(n,np,primes))
{
primes[np] = n;
np ++;
}
// next number
n ++;
}
// parallel search for remaining primes
while(np<Np)
{
// run divisibility tests on the batch from [n to n+Nb[
cudaMemcpy(d_primes, primes, np*sizeof(int), cudaMemcpyHostToDevice);
areDivisible<<<(Nb+255)/256, 256>>>(n, Nb, np, d_primes, d_ans);
cudaMemcpy(ans, d_ans, Nb*sizeof(bool), cudaMemcpyDeviceToHost);
// analyse results
for (int i=0; i<Nb; i++) if (!ans[i] && np<Np)
{
primes[np] = n+i;
np ++;
}
// increment
n += Nb;
}
for (int i=0; i<Np; i++) printf("%d\n", primes[i]);
cudaFree(d_primes);
cudaFree(d_ans);
free(primes);
free(ans);
}
|
19,151 | #include <random>
#include <iostream>
__global__ void scaleKernel(float *dataIn, float *dataOut, float scale, int count) {
unsigned idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx >= count)
return;
const float in = dataIn[idx];
dataIn[idx] = in * scale;
}
int main(void)
{
int size = 100000;
float *hostVal = new float[size];
float *devA;
float *devB;
std::default_random_engine generator(1312);
std::uniform_real_distribution<float> distribution(0.0,10);
for (int i = 0; i < size; ++i) {
hostVal[i] = distribution(generator);
}
cudaMalloc(&devA, size*sizeof(float));
cudaMalloc(&devB, size*sizeof(float));
cudaMemcpy(devA, hostVal, size*sizeof(float), cudaMemcpyDefault);
int threadsPerBlock = 256;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
scaleKernel<<<blocksPerGrid, threadsPerBlock>>>(devA, devB, 2, size);
cudaMemcpy(hostVal, devB, size*sizeof(float), cudaMemcpyDefault);
}
|
19,152 | // #include <cuda_runtime.h>
#include <sys/time.h>
#include <stdio.h>
#include <string.h>
#define THREADS 10
#define ROOM_SIZE 10
#define BLOCKS (ROOM_SIZE * ROOM_SIZE + THREADS - 1) / THREADS
#define ITERATION_LIMIT 100
__global__ void simulate_room(float *H) {
int index = threadIdx.x + blockIdx.x * THREADS;
int j = index % ROOM_SIZE;
int i = index / ROOM_SIZE;
float tmp = H[index];
for(int it = 0 ; it < ITERATION_LIMIT ; it++) {
if(i > 0 && i < ROOM_SIZE - 1 && j > 0 && j < ROOM_SIZE - 1)
tmp = 0.25 * (H[(i - 1) * ROOM_SIZE + j] + H[(i + 1) * ROOM_SIZE + j] + H[i * ROOM_SIZE + j + 1] + H[i * ROOM_SIZE + j - 1]);
__syncthreads();
H[index] = tmp;
__syncthreads();
}
}
int main(int argc, char* argv[]) {
float *h_H, *d_H;
h_H = (float *)malloc(sizeof(float) * ROOM_SIZE * ROOM_SIZE);
for(int i = 0 ; i < ROOM_SIZE ; i++) {
for(int j = 0 ; j < ROOM_SIZE ; j++)
h_H[i * ROOM_SIZE + j] = 0;
}
for(int i = 0 ; i < ROOM_SIZE ; i++) {
h_H[i * ROOM_SIZE + 0] = 20;
h_H[i * ROOM_SIZE + ROOM_SIZE - 1] = 20;
h_H[0 * ROOM_SIZE + i] = 20;
h_H[ROOM_SIZE * (ROOM_SIZE - 1) + i] = 20;
}
for(int i = 3 * (ROOM_SIZE) / 10 ; i < 7 * ROOM_SIZE / 10 ; i++)
h_H[i] = 100;
cudaMalloc((void **) &d_H, sizeof(float) * ROOM_SIZE * ROOM_SIZE);
// for(int i = 0 ; i < ROOM_SIZE; i++) {
// for(int j = 0 ; j < ROOM_SIZE ; j++)
// printf("%.0f ", h_H[i][j]);
// printf("\n");
// }
cudaMemcpy(d_H, h_H, sizeof(float) * ROOM_SIZE * ROOM_SIZE, cudaMemcpyHostToDevice);
printf("THREADS %d BLOCKS %d\n", THREADS, BLOCKS);
struct timeval t1, t2;
gettimeofday(&t1, 0);
simulate_room<<<BLOCKS,THREADS>>>(d_H);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
double time1 = (t2.tv_usec-t1.tv_usec);
printf("Time for GPU: %.8f us \n", time1);
cudaMemcpy(h_H, d_H, sizeof(float) * ROOM_SIZE * ROOM_SIZE, cudaMemcpyDeviceToHost);
for(int i = 0 ; i < ROOM_SIZE; i++) {
for(int j = 0 ; j < ROOM_SIZE ; j++)
printf("%.0f ", h_H[i * ROOM_SIZE + j]);
printf("\n");
}
// for(int i = 0 ; i < ROOM_SIZE ; i++)
} |
19,153 | //Based on the work of Andrew Krepps
#include <stdio.h>
#include <stdlib.h> //srand and rand
#include <math.h>
// Constant data declaration
#define WORKSIZE 1024 // define a default worksize for constant data
__device__ __constant__ int d_a_const[WORKSIZE];
__device__ __constant__ int d_b_const[WORKSIZE];
/*
Profile functions. Taken and modified from https://devblogs.nvidia.com/how-optimize-data-transfers-cuda-cc/
*/
void profileCopiesHostToDevice(int *d_a,
int *h_a,
int *d_b,
int *h_b,
const unsigned int bytes,
const char *desc){
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//start a recording event and execute the transfer afte
cudaEventRecord(startEvent, 0);
// Use either cudaMemcpy or cudaMemcpyToSymbol depending on shared vs constant memory
if(strcmp(desc, "Shared") == 0){
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
}else if(strcmp(desc, "Constant") == 0){
cudaMemcpyToSymbol( d_a_const, h_a, bytes,0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol( d_b_const, h_b, bytes,0, cudaMemcpyHostToDevice);
}
cudaEventRecord(stopEvent, 0); //stop
cudaEventSynchronize(stopEvent);
float time;
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf("\nTransfers Host to Device Time Elaped: %f ms, Bandwidth (MB/s): %f\n\n", time, bytes * 1e-3 / time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
void profileCopiesDeviceToHost( int *h_c_add, int *d_c_add, int *h_c_sub, int *d_c_sub,
int *h_c_mult, int *d_c_mult, int *h_c_mod, int *d_c_mod,
const unsigned int bytes, const char *desc){
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
cudaEventRecord(startEvent, 0);
if(strcmp(desc,"Shared") == 0){
cudaMemcpy( h_c_add, d_c_add, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy( h_c_sub, d_c_sub, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy( h_c_mult, d_c_mult, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy( h_c_mod, d_c_mod, bytes, cudaMemcpyDeviceToHost);
}else if(strcmp(desc,"Constant") == 0){
cudaMemcpy( h_c_add, d_c_add, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy( h_c_sub, d_c_sub, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy( h_c_mult, d_c_mult, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy( h_c_mod, d_c_mod, bytes, cudaMemcpyDeviceToHost);
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
float time;
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf("\n%s transfers Device To Host Time Elaped: %f ms, Bandwidth (MB/s): %f\n\n",desc,time, bytes * 1e-3 / time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
/*
Arithmetic Functions Using shared Memory
*/
// Add Function
__global__ void add_shared(int *a, int *b, int *c, int n){
extern __shared__ int res[];
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
res[threadIdx.x] = a[id] + b[id];
__syncthreads(); // wait for all threads in the block to finish
c[threadIdx.x] = res[threadIdx.x];//since threads from different blocks cannot talk, use thread index instead
}
// subtract function
__global__ void subtract_shared(int *a, int *b, int *c, int n){
extern __shared__ int res[];
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
res[threadIdx.x] = a[id] - b[id];
__syncthreads(); // wait for all threads in the block to finish
c[threadIdx.x] = res[threadIdx.x];
}
// multiply function
__global__ void mult_shared(int *a, int *b, int *c, int n){
extern __shared__ int res[];
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
res[threadIdx.x] = a[id] * b[id];
__syncthreads(); // wait for all threads in the block to finish
c[threadIdx.x] = res[threadIdx.x];
}
// Moudulus function
__global__ void mod_shared(int *a, int *b, int *c, int n){
extern __shared__ int res[];
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
res[threadIdx.x] = a[id] % b[id];
__syncthreads(); // wait for all threads in the block to finish
c[threadIdx.x] = res[threadIdx.x];
}
/*
Arithmetic Functions Using Constant Memory
*/
// Add Function
__global__ void add_const( int *c, int n){
// Get our global thread ID
const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
// use the constant data declared
if (id < n)
c[id] = d_a_const[id] + d_b_const[id];
}
// subtract function
__global__ void subtract_const(int *c, int n){
// Get our global thread ID
const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = d_a_const[id] - d_b_const[id];
}
// multiply function
__global__ void mult_const(int *c, int n){
// Get our global thread ID
const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = d_a_const[id] * d_b_const[id];
}
// Moudulus function
__global__ void mod_const(int *c, int n){
// Get our global thread ID
const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = d_a_const[id] % d_b_const[id];
}
/*
Function calls to arithmetic functions using shared memory and timing
*/
void perform_add_shared(int numBlocks, int totalThreads, int *d_a, int *d_b, int *d_c_add){
float time;
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//start a recording event and execute the Kernels after
cudaEventRecord(startEvent, 0);
//performing add function
printf(" Performing Add function...");
add_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_add, totalThreads);
cudaDeviceSynchronize();
cudaEventRecord(stopEvent, 0); //stop
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf(" Elapsed Time: %f\n", time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
void perform_sub_shared(int numBlocks, int totalThreads, int *d_a,int *d_b,int *d_c_sub){
float time;
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//start a recording event and execute the Kernels after
cudaEventRecord(startEvent, 0);
//performing subtract function
printf(" Performing subtract function");
subtract_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_sub, totalThreads);
cudaDeviceSynchronize();
cudaEventRecord(stopEvent, 0); //stop
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf(" Elapsed Time: %f\n", time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
void perform_mult_shared(int numBlocks, int totalThreads, int *d_a,int *d_b,int *d_c_mult){
float time;
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//start a recording event and execute the Kernels after
cudaEventRecord(startEvent, 0);
//performing mult function
printf(" Performing mult function");
mult_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_mult, totalThreads);
cudaDeviceSynchronize();
cudaEventRecord(stopEvent, 0); //stop
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf(" Elapsed Time: %f\n", time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
void perform_mod_shared(int numBlocks, int totalThreads, int *d_a,int *d_b,int *d_c_mod){
float time;
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//start a recording event and execute the Kernels after
cudaEventRecord(startEvent, 0);
//performing mod fuction
printf(" Performing mod function");
mod_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_mod, totalThreads);
cudaDeviceSynchronize();
cudaEventRecord(stopEvent, 0); //stop
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf(" Elapsed Time: %f\n", time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
// Shared Memory Implementation function
void execute_arithmetic_sharedMem(int totalThreads, int numBlocks){
printf("\t\t*****Executing Arithmetic Functions Using Shared Memory*****\n");
// Host input vectors
int *h_a, *h_b;
//Host output vectors for different functions "h_c_func"
int *h_c_add,*h_c_sub,*h_c_mult,*h_c_mod;
// Device input vectors
int *d_a, *d_b;
//Device output vector
int *d_c_add,*d_c_sub,*d_c_mult,*d_c_mod;
// Size, in bytes, of each vector
const unsigned int bytes = totalThreads*sizeof(int);
// Allocate memory for each vector on host Pinned
cudaMallocHost((void**)&h_a, bytes);
cudaMallocHost((void**)&h_b, bytes);
cudaMallocHost((void**)&h_c_add, bytes);
cudaMallocHost((void**)&h_c_sub, bytes);
cudaMallocHost((void**)&h_c_mult, bytes);
cudaMallocHost((void**)&h_c_mod, bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c_add, bytes);
cudaMalloc(&d_c_sub, bytes);
cudaMalloc(&d_c_mult, bytes);
cudaMalloc(&d_c_mod, bytes);
//initialize the input vectors
for(int i = 0;i<totalThreads;i++){
//first array is 0 through number of threads
h_a[i] = i;
// second array is a random number between 0 and 3
h_b[i] = rand() % 4;
}
//printf the first 7 elements of input arrays
printf("Array 1: ");
for(int i = 0; i<7; i++){
printf("%d ", h_a[i]);
}
printf("\nArray 2: ");
for(int i = 0; i<7; i++){
printf("%d ", h_b[i]);
}
printf("\n\n");
//copy both input arrays from host to device and profile it (see profileCopiesHostToDevice)
profileCopiesHostToDevice(d_a, h_a, d_b, h_b, bytes, "Shared");
//Perform arithmetic functions
perform_add_shared(numBlocks, totalThreads, d_a, d_b, d_c_add);
perform_sub_shared(numBlocks, totalThreads, d_a, d_b, d_c_sub);
perform_mult_shared(numBlocks, totalThreads, d_a, d_b, d_c_mult);
perform_mod_shared(numBlocks, totalThreads, d_a, d_b, d_c_mod);
//copy the output arrays from device to host
profileCopiesDeviceToHost(h_c_add,d_c_add,h_c_sub, d_c_sub,h_c_mult, d_c_mult,h_c_mod, d_c_mod, bytes,"Shared");
// printf the first 7 elements of the results
printf("Arithmetic Results: \n");
printf("Add: ");
for(int i = 0; i<7; i++){
printf("%d ", h_c_add[i]);
}
printf("\nSubtract: ");
for(int i = 0; i<7; i++){
printf("%d ", h_c_sub[i]);
}
printf("\nMultiply: ");
for(int i = 0; i<7; i++){
printf("%d ", h_c_mult[i]);
}
printf("\nMultiply: ");
for(int i = 0; i<7; i++){
printf("%d ", h_c_mod[i]);
}
printf("\n\n");
//free up space on our GPU
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c_add);
cudaFree(d_c_sub);
cudaFree(d_c_mult);
cudaFree(d_c_add);
//free up space on our CPU use cudaFreeHost since pinnned
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c_add);
cudaFreeHost(h_c_sub);
cudaFreeHost(h_c_mult);
cudaFreeHost(h_c_mod);
}
/*
Function calls to arithmetic functions using constant memory
*/
void perform_add_const(int numBlocks, int totalThreads,int *d_c_add){
float time;
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//start a recording event and execute the Kernels after
cudaEventRecord(startEvent, 0);
//performing add function
printf(" Performing Add function...");
add_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_c_add, totalThreads);
cudaDeviceSynchronize();
cudaEventRecord(stopEvent, 0); //stop
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf(" Elapsed Time: %f\n", time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
void perform_sub_const(int numBlocks, int totalThreads, int *d_c_sub){
float time;
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//start a recording event and execute the Kernels after
cudaEventRecord(startEvent, 0);
//performing subtract function
printf(" Performing subtract function");
subtract_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_c_sub, totalThreads);
cudaDeviceSynchronize();
cudaEventRecord(stopEvent, 0); //stop
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf(" Elapsed Time: %f\n", time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
void perform_mult_const(int numBlocks, int totalThreads,int *d_c_mult){
float time;
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//start a recording event and execute the Kernels after
cudaEventRecord(startEvent, 0);
//performing mult function
printf(" Performing mult function");
mult_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_c_mult, totalThreads);
cudaDeviceSynchronize();
cudaEventRecord(stopEvent, 0); //stop
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf(" Elapsed Time: %f\n", time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
void perform_mod_const(int numBlocks, int totalThreads, int *d_c_mod){
float time;
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//start a recording event and execute the Kernels after
cudaEventRecord(startEvent, 0);
//performing mod fuction
printf(" Performing mod function");
mod_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>( d_c_mod, totalThreads);
cudaDeviceSynchronize();
cudaEventRecord(stopEvent, 0); //stop
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf(" Elapsed Time: %f\n", time);
// clean up events
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
// Constant Memory Implementation
void execute_arithmetic_constMem(int totalThreads, int numBlocks){
printf("\t\t*****Executing Arithmetic Functions Using Constant Memory*****\n");
// Host input vectors
int *h_a, *h_b;
//Host output vectors for different functions "h_c_func"
int *h_c_add,*h_c_sub,*h_c_mult,*h_c_mod;
// Device input vectors
int *d_a, *d_b;
//Device output vector
int *d_c_add,*d_c_sub,*d_c_mult,*d_c_mod;
// Size, in bytes, of each vector
const unsigned int bytes = totalThreads*sizeof(int);
// Allocate memory for each vector on host Pinned
cudaMallocHost((void**)&h_a, bytes);
cudaMallocHost((void**)&h_b, bytes);
cudaMallocHost((void**)&h_c_add, bytes);
cudaMallocHost((void**)&h_c_sub, bytes);
cudaMallocHost((void**)&h_c_mult, bytes);
cudaMallocHost((void**)&h_c_mod, bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c_add, bytes);
cudaMalloc(&d_c_sub, bytes);
cudaMalloc(&d_c_mult, bytes);
cudaMalloc(&d_c_mod, bytes);
//initialize the input vectors
for(int i = 0;i<totalThreads;i++){
//first array is 0 through number of threads
h_a[i] = i;
// second array is a random number between 0 and 3
h_b[i] = rand() % 4;
}
//printf the first 7 elements of input arrays
printf("Array 1: ");
for(int i = 0; i<7; i++){
printf("%d ", h_a[i]);
}
printf("\nArray 2: ");
for(int i = 0; i<7; i++){
printf("%d ", h_b[i]);
}
printf("\n\n");
//copy both input arrays from host to device using cudaMemcpyToSymbol() (see profileCopiesHostToDevice)
profileCopiesHostToDevice(d_a_const, h_a, d_b_const, h_b, bytes, "Constant");
//Perform arithmetic functions
perform_add_const(numBlocks, totalThreads, d_c_add);
perform_sub_const(numBlocks, totalThreads, d_c_sub);
perform_mult_const(numBlocks, totalThreads, d_c_mult);
perform_mod_const(numBlocks, totalThreads, d_c_mod);
//copy the output arrays from device to host using cudaMemcyFromSymbol()
profileCopiesDeviceToHost(h_c_add,d_c_add,h_c_sub, d_c_sub,h_c_mult, d_c_mult,h_c_mod, d_c_mod, bytes,"Constant");
// printf the first 7 elements of the results
printf("Arithmetic Results: \n");
printf("Add: ");
for(int i = 0; i<7; i++){
printf("%d ", h_c_add[i]);
}
printf("\nSubtract: ");
for(int i = 0; i<7; i++){
printf("%d ", h_c_sub[i]);
}
printf("\nMultiply: ");
for(int i = 0; i<7; i++){
printf("%d ", h_c_mult[i]);
}
printf("\nMod: ");
for(int i = 0; i<7; i++){
printf("%d ", h_c_mod[i]);
}
printf("\n\n");
//free up space on our GPU
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c_add);
cudaFree(d_c_sub);
cudaFree(d_c_mult);
cudaFree(d_c_add);
//free up space on our CPU use cudaFreeHost since pinnned
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c_add);
cudaFreeHost(h_c_sub);
cudaFreeHost(h_c_mult);
cudaFreeHost(h_c_mod);
}
int main(int argc, char** argv)
{
int totalThreads = (1 << 10);
int blockSize = 256;
//User wants to run the Global vs Pinned Examples
if( argc > 2 && argc < 4){
// Ensure the user supplies both number of threads and block size
// otherwise use default values
totalThreads = atoi(argv[1]);
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
printf("\nUsing %d Threads and %d BlockSize\n",totalThreads, blockSize);
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
// Lets see what we are working with and calculate the Amount of data we are transfering
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
const unsigned int bytes = totalThreads*sizeof(int);
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n\n", bytes * bytes / totalThreads);
//Execute Pageable Arithmetic
execute_arithmetic_sharedMem(totalThreads, numBlocks);
//Execute The Pinned Arithmetic
execute_arithmetic_constMem(totalThreads, numBlocks);
return 0;
}
|
19,154 | #include <iostream>
#include <stdio.h>
using namespace std;
int main ( void )
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount( &count );
for ( int i = 0; i < count; i++ )
{
cudaGetDeviceProperties( &prop, i );
printf( "=======================================================================================================\n" );
printf( "========= 7D GPU of 6D GX [ 4D BX; 5D BY; 6D BZ ] of 4D B [ 1D THX; 2D THY; 3D THZ ] ==================\n" );
printf( "========= single Thread[] in 4D Block [ THX, THY, THZ ] acts as ALU ===================================\n" );
printf( "=======================================================================================================\n" );
printf( "Grid[ Blocks, Blocks, Blocks ] => gridKnot/kernel<<< Blocks, BlockThread >>>\n" );
printf( "Grid[ Blocks, Blocks, Blocks ] => gridKnot/kernel<<< Block, BlockThreads >>>\n" );
printf( "Grid[ 0,0,0 ] = Block0[ Thread[ 0, 0, 0 ], Thread[ 1, 0, 0 ], Thread[ 2, 0, 0 ], Thread[ 3, 0, 0 ]... ]\n" );
printf( "Grid[ 0,0,0 ] = Block0[ Thread[ 0, 0, 0 ], Thread[ 0, 1, 0 ], Thread[ 0, 2, 0 ], Thread[ 0, 3, 0 ]... ]\n" );
printf( "Grid[ 0,0,0 ] = Block0[ Thread[ 0, 0, 0 ], Thread[ 0, 0, 1 ], Thread[ 0, 0, 2 ], Thread[ 0, 0, 3 ]... ]\n" );
printf( "Grid[ 1,0,0 ] = Block1[ Thread[ 0, 0, 0 ], Thread[ 1, 0, 0 ], Thread[ 2, 0, 0 ], Thread[ 3, 0, 0 ]... ]\n" );
printf( "Grid[ 1,0,0 ] = Block1[ Thread[ 0, 0, 0 ], Thread[ 0, 1, 0 ], Thread[ 0, 2, 0 ], Thread[ 0, 3, 0 ]... ]\n" );
printf( "Grid[ 1,0,0 ] = Block1[ Thread[ 0, 0, 0 ], Thread[ 0, 0, 1 ], Thread[ 0, 0, 2 ], Thread[ 0, 0, 3 ]... ]\n" );
printf( "Grid[ 2,0,0 ] = Block2[ Thread[ 0, 0, 0 ], Thread[ 1, 0, 0 ], Thread[ 2, 0, 0 ], Thread[ 3, 0, 0 ]... ]\n" );
printf( "Grid[ 2,0,0 ] = Block2[ Thread[ 0, 0, 0 ], Thread[ 0, 1, 0 ], Thread[ 0, 2, 0 ], Thread[ 0, 3, 0 ]... ]\n" );
printf( "Grid[ 2,0,0 ] = Block2[ Thread[ 0, 0, 0 ], Thread[ 0, 0, 1 ], Thread[ 0, 0, 2 ], Thread[ 0, 0, 3 ]... ]\n" );
printf( "Grid[ 0,1,0 ] = Block3[ Thread[ 0, 0, 0 ], Thread[ 1, 0, 0 ], Thread[ 2, 0, 0 ], Thread[ 3, 0, 0 ]... ]\n" );
printf( "Grid[ 0,1,0 ] = Block3[ Thread[ 0, 0, 0 ], Thread[ 0, 1, 0 ], Thread[ 0, 2, 0 ], Thread[ 0, 3, 0 ]... ]\n" );
printf( "Grid[ 0,1,0 ] = Block3[ Thread[ 0, 0, 0 ], Thread[ 0, 0, 1 ], Thread[ 0, 0, 2 ], Thread[ 0, 0, 3 ]... ]\n" );
printf( "===============================================================================\n" );
printf( "Max. Blocks per Grid: 2^31 - 1\n" );
printf( "Max. Threads per Block: %d\n", prop.maxThreadsPerBlock );
printf( "Max. No. of Thread Dimensions: [ %d, %d, %d ]\n", prop.maxThreadsDim[ 0 ], prop.maxThreadsDim[ 1 ], prop.maxThreadsDim[ 2 ] );
printf( "Max. No. of Grid Dimensions: [ %d, %d, %d ]; including max. [ %d ] 1D-3D threads per block \n",
prop.maxGridSize[ 0 ], prop.maxGridSize[ 1 ], prop.maxGridSize[ 2 ], prop.maxThreadsPerBlock );
printf( "Global memory size: %.2f[GB]\n", ( float )prop.totalGlobalMem / ( 1024.0f * 1024.0f * 1024.0f ) );
printf( "Global memory bus width: %i\n", ( int )prop.memoryBusWidth );
printf( "Memory freq.: %.2f[MHz]\n", ( float )prop.memoryClockRate / ( 1000.0f ) );
printf( "CUDA ver.: %d.%d\n", prop.major, prop.minor );
printf( "Integrated GPU.: %d\n", prop.integrated );
printf( "Concurrency: %d\n", prop.concurrentKernels );
printf( "Stream Multiprocesors SMM.: %d\n", prop.multiProcessorCount );
}
return 0;
}
|
19,155 | #include <iostream>
#include <math.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define NN 200
__global__ void add_cuda_good(int *x,int *y)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
for (int i = 0; i< NN ; i++) {
y[bid*blockDim.x + tid ] += x[bid*blockDim.x + tid];
y[bid*blockDim.x + tid ] *= 2;
y[bid*blockDim.x + tid ] += bid*blockDim.x + tid;
y[bid*blockDim.x + tid ] += 3;
y[bid*blockDim.x + tid ] += x[bid*blockDim.x + tid];
y[bid*blockDim.x + tid ] *= 2;
y[bid*blockDim.x + tid ] += bid*blockDim.x + tid;
y[bid*blockDim.x + tid ] += 3;
y[bid*blockDim.x + tid ] += x[bid*blockDim.x + tid];
y[bid*blockDim.x + tid ] *= 2;
y[bid*blockDim.x + tid ] += bid*blockDim.x + tid;
y[bid*blockDim.x + tid ] += 3;
}
}
void add_cpu_bad(int *x ,int *y, int size)
{
for (int i=0; i< size; i++){
for (int i = 0; i< NN; i++) {
y[i] += x[i];
y[i] *= 2;
y[i] += i;
y[i] += 3;
y[i] += x[i];
y[i] *= 2;
y[i] += i;
y[i] += 3;
y[i] += x[i];
y[i] *= 2;
y[i] += i;
y[i] += 3;
}
}
}
void print_1D_arr(const char *text,int arr[], int size)
{
if (text == NULL) printf("\n");
else printf("--%s--\n",text);
for (int i=0;i<size;i++)
{
printf(":%d:",arr[i]);
}
printf("\n");
}
int64_t timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p)
{
return ((timeA_p->tv_sec * 1000000000) + timeA_p->tv_nsec) -
((timeB_p->tv_sec * 1000000000) + timeB_p->tv_nsec);
}
int64_t timeDiffSec(struct timespec *timeA_p, struct timespec *timeB_p)
{
return timeA_p->tv_sec - timeB_p->tv_sec ;
}
void arr_init(int *x,int *y, int N)
{
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 10;
y[i] = 2;
}
}
int main(int argc, char** argv)
{
struct timespec start, end;
int *x,*y;
uint64_t timeElapsedGPU;
uint64_t timeElapsedCPU;
int N,T;
sscanf(argv[1] ,"%d", &N);
sscanf(argv[2], "%d", &T);
clock_gettime(CLOCK_MONOTONIC, &start);
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(int));
cudaMallocManaged(&y, N*sizeof(int));
clock_gettime(CLOCK_MONOTONIC, &end);
timeElapsedGPU = timespecDiff(&end, &start);
printf("\n\n\n timeElapsed for init = %d\n",timeElapsedGPU);
arr_init(x,y,N);
// print_1D_arr("CUDA:Input",x,10);
int blockSize = T;
int numBlocks = N/blockSize;
printf(" numBlocks=%d, blockSize=%d\n", numBlocks, blockSize);
clock_gettime(CLOCK_MONOTONIC, &start);
add_cuda_good<<<numBlocks, blockSize>>>( x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
timeElapsedGPU = timespecDiff(&end, &start);
printf("\n\n\n timeElapsed GPU = %d\n",timeElapsedGPU);
printf("\n Time Diff Sec GPU = %d\n",timeDiffSec(&end,&start));
// print_1D_arr("CUDA:Output",y,10);
// printf("\n\n\n----Final check:%d\n", y[N-1]);
arr_init(x,y,N);
// print_1D_arr("CUDA:Input",x,10);
clock_gettime(CLOCK_MONOTONIC, &start);
// Some code I am interested in measuring
add_cpu_bad(x,y,N);
clock_gettime(CLOCK_MONOTONIC, &end);
timeElapsedCPU = timespecDiff(&end, &start);
printf("\n\n\n timeElapsed CPU= %d ratio:%f\n",timeElapsedCPU, (float)timeElapsedCPU/timeElapsedGPU);
printf("\n Time Diff Sec CPU = %d\n",timeDiffSec(&end,&start));
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
19,156 | #include<cuda_runtime.h>
#include<stdio.h>
__global__ void checkIndex(void)
{
printf("threadIdx: (%d %d %d) blockIdx: (%d %d %d) blockDim: (%d %d %d) "
" gridDim: (%d %d %d)\n",threadIdx.x,threadIdx.y,threadIdx.z,blockIdx.x,blockIdx.y,blockIdx.z,blockDim.x,blockDim.y, blockDim.z,gridDim.x,gridDim.y,gridDim.z);
}
int main(int args, char **argv)
{
int n=6;
// define grid and block structure//
dim3 block(3);
dim3 grid((n+block.x-1)/block.x);
// ckeck grid and block dimension from host side//
printf("grid.x %d grid.y %d grid.x %d\n",grid.x,grid.y,grid.z);
printf("block.x %d block.y %d block.z %d\n",block.x,block.y,block.z);
// check grid and block from device side//
checkIndex <<<grid, block>>> ();
cudaDeviceReset();
return 0;
}
|
19,157 | extern "C" __global__ void
staggered_sharp(float* arr, float d)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
arr[i] = arr[i] / d;
} |
19,158 | /** Prints CUDA GPU information in a machine-readable user-friendly format.
*
* The output can be read with a YAML parser, and is an array with one element
* per CUDA GPU.
*
* Build with:
* nvcc -o cudainfo cudainfo.cu
*/
#include <stdio.h>
int main() {
cudaDeviceProp deviceProperties;
cudaError_t status;
int deviceCount;
if((status = cudaGetDeviceCount(&deviceCount)) != cudaSuccess) {
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(status));
return 1;
}
printf("---\n");
for (int i = 0; i < deviceCount; ++i) {
status = cudaGetDeviceProperties(&deviceProperties, i);
if(status != cudaSuccess) {
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(status));
return 1;
}
printf("- name: \"%s\"\n", deviceProperties.name);
printf(" compute_version: \"%d.%d\"\n",
deviceProperties.major, deviceProperties.minor);
printf(" pci_address: \"%02d:%02d\"\n",
deviceProperties.pciBusID, deviceProperties.pciDeviceID);
printf(" total_global_memory: %zu\n", deviceProperties.totalGlobalMem);
printf(" total_constant_memory: %zu\n", deviceProperties.totalConstMem);
printf(" shared_memory_per_block: %zu\n",
deviceProperties.sharedMemPerBlock);
printf(" max_malloc_pitch: %zu\n", deviceProperties.memPitch);
printf(" texture_alignment: %zu\n", deviceProperties.textureAlignment);
printf(" registers_per_block: %d\n", deviceProperties.regsPerBlock);
printf(" max_threads_per_block: %d\n",
deviceProperties.maxThreadsPerBlock);
printf(" max_thread_block_dimension: [%d, %d, %d]\n",
deviceProperties.maxThreadsDim[0], deviceProperties.maxThreadsDim[1],
deviceProperties.maxThreadsDim[2]);
printf(" max_grid_size: [%d, %d, %d]\n",
deviceProperties.maxGridSize[0], deviceProperties.maxGridSize[1],
deviceProperties.maxGridSize[2]);
printf(" warp_size_threads: %d\n", deviceProperties.warpSize);
printf(" multi_processor_count: %d\n",
deviceProperties.multiProcessorCount);
printf(" clock_rate_khz: %d\n", deviceProperties.clockRate);
printf(" pci_bus_id: %d\n", deviceProperties.pciBusID);
printf(" pci_device_id: %d\n", deviceProperties.pciDeviceID);
printf(" compute_major: %d\n", deviceProperties.major);
printf(" compute_minor: %d\n", deviceProperties.minor);
printf(" integrated: %s\n",
deviceProperties.integrated ? "true" : "false");
printf(" supports_device_overlap: %s\n",
deviceProperties.deviceOverlap ? "true" : "false");
printf(" kernel_execution_timeout_enabled: %s\n",
deviceProperties.kernelExecTimeoutEnabled ? "true" : "false");
printf(" can_map_host_memory: %s\n",
deviceProperties.canMapHostMemory ? "true" : "false");
printf(" supports_concurrent_kernels: %s\n",
deviceProperties.concurrentKernels ? "true" : "false");
printf(" ecc_enabled: %s\n",
deviceProperties.ECCEnabled ? "true" : "false");
printf(" using_tcc_driver: %s\n",
deviceProperties.tccDriver ? "true" : "false");
const char* computeMode;
switch (deviceProperties.computeMode) {
case cudaComputeModeDefault:
computeMode = "default";
break;
case cudaComputeModeExclusive:
computeMode = "exclusive";
break;
case cudaComputeModeProhibited:
computeMode = "prohibited";
break;
default:
computeMode = "unknown";
}
printf(" compute_mode: %s\n", computeMode);
}
return 0;
}
|
19,159 | #include <stdio.h>
__global__ void loop()
{
int curr_loc = threadIdx.x + blockIdx.x * blockDim.x;
printf("This is iteration number %d\n", curr_loc);
}
int main()
{
/*
* we could also do <<<1,10>>> or <<<5,2>>>
*/
int threads = 5;
int blocks = 2;
loop<<<blocks, threads>>>();
cudaDeviceSynchronize();
}
|
19,160 | #include <stdio.h>
#include <math.h>
long long res[64];
__global__ void fib(long long *res)
{
int idx = threadIdx.x;
res[idx] = (long long)(1.0/sqrt(5.0)*(pow((1+sqrt(5.0))/2.0, idx+1) - pow((1-sqrt(5.0))/2.0, idx+1)) + 0.5);
// printf("%d\n", res[idx]);
}
int main()
{
int n;
long long *gpures;
scanf("%d", &n);
cudaMalloc(&gpures, n*sizeof(long long));
fib<<<1,n>>>(gpures);
cudaMemcpy(res, gpures, n*sizeof(long long), cudaMemcpyDeviceToHost);
cudaFree(gpures);
for (int i = 0; i < n; ++i)
{
printf(i == n-1 ? "%lld\n" : "%lld ", res[i]);
}
return 0;
} |
19,161 | /*--
--*/
#include<stdio.h>
#include "../include/Initializer.cuh"
void Initialize_state(float *state){
for(int i = 0; i < DIM_X; i++){
state[i] = initial_state[i];
}
}
void Initialize_input(float *input){
for(int i = 0; i < DIM_U; i++){
input[i] = initial_input[i];
}
}
void Initialize_diff_state(float *diff_st){
for(int i = 0; i < DIM_X; i++){
diff_st[i] = initial_diff_state[i];
}
}
int countBlocks(int a, const int b) {
int num;
num = a / b;
if (a < b || a % b > 0)
num++;
return num;
}
void Initialize(float *state, float *input, float *diff_st, SpecGPU &a, ControllerParams &get_param, DataMessanger *hst, InputSequences *inp_sq){
Initialize_state(state);
Initialize_input(input);
Initialize_diff_state(diff_st);
a.NUM_SAMPLES = NUM_OF_BLOCKS * NUM_OF_THREAD_PER_BLOCKS;
a.NUM_BLOCKS = NUM_OF_BLOCKS;
a.TH_PER_BLS = NUM_OF_THREAD_PER_BLOCKS;
a.NUM_PRED_STEPS = NUM_OF_HORIZON;
a.ITERATIONS = NUM_OF_ITERATIONS;
a.NUM_SEEDS = countBlocks(NUM_OF_BLOCKS * NUM_OF_THREAD_PER_BLOCKS * NUM_OF_HORIZON, NUM_OF_THREAD_PER_BLOCKS);
a.RATE_OF_CYCLE = CONTROL_CYCLE;
a.LAMBDA = Clambda;
for(int i = 0; i < DIM_U; i++){
a.INIT_VARIANCE[i] = INITIAL_SIGMA[i];
a.COOLING_RATES[i] = RATE_OF_COOLING[i];
}
for(int i = 0; i < NUM_OF_SYS_PARAMETERS; i++){
get_param.d_param[i] = system_params[i];
}
for(int i = 0; i < DIM_Q; i++){
get_param.d_Q[i] = Q[i];
}
for(int i = 0; i < DIM_R; i++){
get_param.d_R[i] = R[i];
}
for(int i = 0; i < NUM_OF_I_CONSTRAINT; i++){
get_param.d_I_constraint[i] = constraint_for_input[i];
}
for(int i = 0; i < NUM_OF_S_CONSTRAINT; i++){
get_param.d_S_constraint[i] = constraint_for_state[i];
}
DataMessanger in_func;
in_func.L = 0.0f;
in_func.W = 0.0f;
in_func.IT_L = 0.0f;
for(int i = 0; i < NUM_OF_HORIZON; i++){
for(int k = 0; k < DIM_U; k++){
in_func.u[k][i] = initial_input[k];
inp_sq[k].u[i] = initial_input[k];
}
}
for(int i = 0; i < NUM_OF_BLOCKS; i++){
hst[i] = in_func;
}
}
|
19,162 | #include <stdio.h>
void __global__ kernel_matrix_sum(int *A, int *B, int *C, const int nx, const int ny) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy + ny * ix;
if((ix < nx) && (iy < ny)) C[idx] = A[idx] + B[idx];
} |
19,163 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <stdio.h>
extern "C"
cudaError_t cuda_main()
{
printf("stau\n");
// generate bunch random numbers on the host
thrust::host_vector<int> h_vec(1 << 25);
thrust::generate(h_vec.begin(), h_vec.end(), rand);
printf("dua %ld %ld %ld\n", h_vec.size(), h_vec[20], h_vec[2000]);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
printf("tiga\n");
// sort data on the device (805 Mkeys/sec on GeForce GTX 480)
thrust::sort(d_vec.begin(), d_vec.end());
printf("empat\n");
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
printf("lima %ld %ld %ld\n", h_vec.size(), h_vec[20], h_vec[2000]);
return cudaGetLastError();
}
|
19,164 | #define N 1024
#include<stdio.h>
__global__ void add(int *a, int *b, int *c){
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main(){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, N * sizeof(int));
cudaMalloc((void **) &dev_b, N * sizeof(int));
cudaMalloc((void **) &dev_c, N * sizeof(int));
for(int j = 0; j < N; j++){
a[j] = 2;
b[j] = 3;
}
cudaMemcpy(dev_a, a, (N * sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, (N * sizeof(int)), cudaMemcpyHostToDevice);
add<<<1, 1024>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, (N * sizeof(int)), cudaMemcpyDeviceToHost);
for(int j = 0; j <= 5; j++)
printf("\n%d",c[j]);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
19,165 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void kernel1()
{
printf("kernel #1\n");
}
__global__ void kernel2()
{
printf("kernel #2\n");
}
int main(int argc,char **argv)
{
printf("Testing multiple kernel launch and show in-order execution of two kernels \n");
int nThreadsPerBlock = 32;
int blocks = 128;
cudaStream_t s1;
cudaStream_t s2;
cudaStreamCreate(&s1);
cudaStreamCreate(&s2);
kernel1<<<blocks,nThreadsPerBlock,0,s1>>> ();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess ) printf("cuda function failure at line %d :%s \n",__LINE__,cudaGetErrorString(err));
kernel2<<<blocks,nThreadsPerBlock,1,s2>>> ();
err = cudaGetLastError();
if (err != cudaSuccess ) printf("cuda function failure at line %d :%s \n",__LINE__,cudaGetErrorString(err));
cudaDeviceSynchronize();
return 0 ;
}
|
19,166 | #include <stdio.h>
int main()
{
float y = 0;
float vy = 10000000000000;
float dt = 0.1;
float L = 1e-4;
y = y + vy*dt;
printf("%f %f %f\n", y, floor(y/L)*L, y - floor(y/L)*L);
y = y - floor(y/L)*L;
}
|
19,167 | #include <stdio.h>
#include <iostream>
#include <time.h>
//#include <cutil_inline.h>
using namespace std;
//*****************************************//
//ֽ豸ϱ __global__ʶ
template<typename T> __global__ void reducePI1(T* __restrict__ d_sum, int num){
//__restrict__ ˵ֻжȡݣʲôأ
//printf("blockIdx.x is %d\n",blockIdx.x);//߳̿0~grid-1
//printf("blockDim.x is %d\n",blockDim.x);//߳̿߳<<<grid,block,size>>>еblock
//printf("threadIdx.x is %d\n",threadIdx.x);//ÿ̵߳̿߳ıţ0~block-1
int id = blockIdx.x*blockDim.x + threadIdx.x;//Ϊÿ̹߳Ψһţ0~grid*block-1
T temp;
T pSum = 0;
extern T __shared__ s_pi[];//ݴڹ洢ϣֻб߳̿ڵ߳̿Է
T rnum = 1.0/num;
for(int i=id;i<num;i +=blockDim.x*gridDim.x){
//ÿ̼߳ĴܵĴnumܵ߳grid*block
temp = (i+0.5f)*rnum;
pSum += 4.0f/(1+temp*temp);
}
s_pi[threadIdx.x] = pSum*rnum;//ÿ߳̿е̻߳Լõs_pi洢ڱĹ洢
__syncthreads();//ȴ̼߳
for(int i = (blockDim.x>>1);i >0;i >>= 1){
//ڵ ۼ
if (threadIdx.x<i){
s_pi[threadIdx.x] += s_pi[threadIdx.x+i];
}
__syncthreads();
}
//Ӻ͵ĽдӦԴУԱreducePI2ʹ
if (threadIdx.x==0)
{
d_sum[blockIdx.x]=s_pi[0];
}
//δӦִƵ㷨ǽкܴƫδҵԭ^_^
//if (warpSize>63){
// if (threadIdx.x<32){
// s_pi[threadIdx.x] += s_pi[threadIdx.x +32];
// }
//}
//if (threadIdx.x<16){
// s_pi[threadIdx.x] += s_pi[threadIdx.x +16];
//printf("threadIdx.x 16 is %d\n",threadIdx.x);
//}
//if (threadIdx.x<8){
// s_pi[threadIdx.x] += s_pi[threadIdx.x +8];
//printf("threadIdx.x 8 is %d\n",threadIdx.x);
//}
//if (threadIdx.x<4){
// s_pi[threadIdx.x] += s_pi[threadIdx.x +4];
//printf("threadIdx.x 4 is %d\n",threadIdx.x);
//}
//if (threadIdx.x<2){
// s_pi[threadIdx.x] += s_pi[threadIdx.x +2];
//printf("threadIdx.x 2 is %d\n",threadIdx.x);
//}
//if (threadIdx.x<1){
// d_sum[blockIdx.x] = s_pi[0]+s_pi[1];
//printf("threadIdx.x 1 is %d\n",threadIdx.x);
//}
}
template<typename T> __global__ void reducePI2(T* __restrict__ d_sum, int num, T* __restrict__ d_pi){
int id = threadIdx.x;//ֻ߳̿һ߳gridȻidΪ
extern T __shared__ s_sum[];//ǹڴеģֻпڿɼ
s_sum[id]=d_sum[id];//Դеװؽ
__syncthreads();//ȴװ
for(int i = (blockDim.x>>1);i>0;i >>=1)
//Ȼðۺ͵ķԱ߳еs_sum
{
if (id<i){
s_sum[id] += s_sum[id+i];
}
__syncthreads();//ȴ
}
//ͽдԴ棬ʹcpu˿ɼ
if(threadIdx.x==0)
{
*d_pi =s_sum[0];
}
//if (warpSize>63){
// if (threadIdx.x<32){
// s_sum[threadIdx.x] += s_sum[threadIdx.x +32];
// }
//}
//if (threadIdx.x<16){
// s_sum[threadIdx.x] += s_sum[threadIdx.x +16];
//}//
//if (threadIdx.x<8){
// s_sum[threadIdx.x] += s_sum[threadIdx.x +8];
//}
//if (threadIdx.x<4){
// s_sum[threadIdx.x] += s_sum[threadIdx.x +4];
//}
//if (threadIdx.x<2){
// s_sum[threadIdx.x] += s_sum[threadIdx.x +2];
//}
//if (threadIdx.x<1){
// *d_pi = s_sum[0]+s_sum[1];
//}
}
//**********************************************//
//´ϱ
template <typename T> T reducePI(int num){
int grid = 1024;//߳̿
T *tmp;
cudaMalloc((void**)&tmp,grid*sizeof(T));//豸洢Դ棩Ͽgrid*sizeof(T)СĿռ䣬ϵָtmpָÿռ
reducePI1<<<grid,256,256*sizeof(T)>>>(tmp,num);//reducePI1
//ʾgrid߳̿飬ÿ߳̿256̣߳ÿ߳̿ʹ256*sizeСĹ洢ֻпڿԷʣ
//ִ֮tmpΪԴд洢grid м
//printf("%d\n",__LINE__);//ʾкţ֪ʲô
T *d_PI;
cudaMalloc((void**)&d_PI,sizeof(T));//ԴΪеļٿռ
reducePI2<<<1,grid,grid*sizeof(T)>>>(tmp,grid,d_PI);//ֻһ߳̿飬grid߳
//ִкԴd_PIλô
T pi;//ڴϵĿռ
cudaMemcpy(&pi,d_PI,sizeof(T),cudaMemcpyDeviceToHost);//Դнݿ
cudaFree(tmp);//ͷӦԴռ
cudaFree(d_PI);
return pi;
}
template <typename T> T cpuPI(int num){
T sum = 0.0f;
T temp;
for (int i=0;i<num;i++)
{
temp =(i+0.5f)/num;
sum += 4/(1+temp*temp);
}
return sum/num;
}
int main(){
printf("test for compell \n");
clock_t start, finish;//ʱ
float costtime;
start = clock();
//************
printf("cpu pi is %f\n",cpuPI<float>(1000000));//ͨĴѭ
//*************
finish = clock();
costtime = (float)(finish - start) / CLOCKS_PER_SEC; //λ
printf("costtime of CPU is %f\n",costtime);
start = clock();
//************
printf("gpu pi is %f\n",reducePI<float>(1000000));//ϵIJм㺯
//************
finish = clock();
costtime = (float)(finish - start) / CLOCKS_PER_SEC;
printf("costtime of GPU is %f\n",costtime);
return 0;
}
|
19,168 | //pass
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <assert.h>
#define N 2//32
__global__ void kernel(uint4 *out) {
uint4 vector = {1,1,1,1};
out[threadIdx.x] = vector;
}
|
19,169 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/generate.h> // Allow for the
#include <thrust/reduce.h> // Include the reduce operation
#include <thrust/fill.h> // Include the fill operation
#include <thrust/functional.h> // Include the modulus operation
#include <thrust/adjacent_difference.h>
#include <time.h>
#include <iostream>
// Bibliography
// [1] http://docs.nvidia.com/cuda/thrust/index.html
// Adds the sum of the differences between random numbers
void count_adj_diffs(const unsigned int offset)
{
// Compute the size of the data
int data_size = 32 << offset;
// Create events (for timing)
cudaEvent_t adjDiffStart;
cudaEvent_t adjDiffStop;
cudaEventCreate(&adjDiffStart);
cudaEventCreate(&adjDiffStop);
// Record the current time
cudaEventRecord(adjDiffStart);
// Create the host data and results vector (they are not data dependent)
thrust::host_vector<unsigned int> host_data(data_size);
thrust::device_vector<unsigned int> results(data_size);
// Generate random data and place it in the host vector
thrust::generate(host_data.begin(), host_data.end(), rand);
// Create a device vector that copies the host data
thrust::device_vector<unsigned int> device_data(host_data.begin(), host_data.end());
// Take the adjacent differences between each element in the device vector
thrust::adjacent_difference(device_data.begin(), device_data.end(), results.begin());
// Add the differences together and obtain the result
int result = thrust::reduce(results.begin(), results.end());
// Stop recording
cudaEventRecord(adjDiffStop, 0);
cudaEventSynchronize(adjDiffStop);
// Get the time duration
float adjDiffTime;
cudaEventElapsedTime(&adjDiffTime, adjDiffStart, adjDiffStop);
// Display the results
std::cout << "Number of random numbers: " << data_size;
// (perform some formatting)
if (data_size < 100000)
{
std::cout << "\t";
}
std::cout << "\tTotal difference size: " << result << "\tElapsed Time (ms): " << adjDiffTime << std::endl;
}
// Perfom a count of odd random numbers 32 << offset numbers
void add_odd_randoms(const unsigned int offset)
{
// Compute the size of the data
int data_size = 32 << offset;
// Create events (for timing)
cudaEvent_t randomStart;
cudaEvent_t randomStop;
cudaEventCreate(&randomStart);
cudaEventCreate(&randomStop);
// Record the current time
cudaEventRecord(randomStart);
// Create the host data vector
thrust::host_vector<unsigned int> host_data(data_size);
// Generate random data and place it in the host vector
thrust::generate(host_data.begin(), host_data.end(), rand);
// Create a device vector that copies the host data
thrust::device_vector<unsigned int> device_data(host_data.begin(), host_data.end());
// Create the vector that will contain the modulo value
thrust::device_vector<unsigned int> modulo_data(data_size);
// Fill the modulo vector with 2 (all data values will be modulo 2)
thrust::fill(modulo_data.begin(), modulo_data.end(), 2);
// Perform the modulo opereration in place
thrust::transform(device_data.begin(), device_data.end(), modulo_data.begin(), device_data.begin(), thrust::modulus<unsigned int>());
// Add the number of 1's together (the number of odd numbers)
int count = thrust::reduce(device_data.begin(), device_data.end());
// Stop recording
cudaEventRecord(randomStop, 0);
cudaEventSynchronize(randomStop);
// Get the time duration
float randomTime;
cudaEventElapsedTime(&randomTime, randomStart, randomStop);
// Display the results
std::cout << "Number of random numbers: " << data_size;
// (perform some formatting)
if (data_size < 100000)
{
std::cout << "\t";
}
std::cout << "\tNumber of odd numbers: " << count << "\tElapsed Time (ms): " << randomTime << std::endl;
}
// The main entry point (inputs are ignored)
int main(int argc, char* argv[])
{
// Seed the random number generator with the current time
srand(time(NULL));
// Iterate 20 times, calling each algorithm
for (unsigned int i = 0; i < 20; ++i)
{
count_adj_diffs(i);
add_odd_randoms(i);
}
return EXIT_SUCCESS;
} |
19,170 | /**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// 1ά
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// 2Ϊڴ
float *h_A, *h_B, *h_C;
h_A = h_B = h_C = NULL;
h_A = (float *)malloc(size);
h_B = (float *)malloc(size);
h_C = (float *)malloc(size);
// 3ڴֵ
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// 4ΪԴ
float *d_A, *d_B, *d_C;
d_A = d_B = d_C = NULL;
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
// 5Դֵڴ濽
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// 6߳ά
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
// 7GPUִм
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
// 8ջɹԴ濽ڴ
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// 9֤ɹǷȷ
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// 10ͷԴڴ
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
// 11CUDA
cudaDeviceReset();
// 12˳
printf("Done\n");
return 0;
}
|
19,171 | /*
* 日期: 2019-1-24
* 时间: 14:42
* 姓名: 杨丰拓
*/
//******************************************************************************************************************//
//对大量数据进行归约操作(例如,最大,最小,求和等)可通过使用共享内存缩短归约操作的时间.
//本程序预设目标归约2^20~2^30个数据,核划分为一维网格一维线程块,通过三次归约操作求出最大值.
//实际可处理数据为0~2^26.
//本程序可以处理0~2^20个数据,但实际上相对与处理的数据来说代码略有多余.在处理0~2^10个数据时仅需调用一次核函数,
//在处理2^10~2^20个数据时仅需调用两次核函数.当需要处理的数据量达到2^26时,网格内x维度上有65536个块,而网格的x,
//y,z方向上的维度最大值是 65535 ,块的数量超出维度上限,所以会出现结果输出为零的情况.
//若需要处理更多的数据,那么网格的维度需要变为二维乃至三维才能正常处理.
//******************************************************************************************************************//
#include <iostream>
#include <cuda_runtime.h>
#include <stdio.h>
#define k1 1024
#define k2 1024
//检查宏,用于检测cuda编程出错位置及原因
#define CHECK(call) \
{ \
const cudaError_t error=call; \
if(error!=cudaSuccess) \
{ \
printf("Error:%s:%d,",__FILE__,__LINE__); \
printf("code:%d,reason:%s\n",error,cudaGetErrorString(error)); \
exit(1); \
} \
} \
using namespace std;
/* @property 核函数
* @func 归约求每个共享内存内的最大值
* @param_in in 指向待归约的数据
* @param_in num 输入的数据量
* @param_out out 指向数据的输出地址
*/
__global__ void reduce(int *in,int *out,int num)
{
int tid=threadIdx.x; //块内线程的索引
int idx=blockIdx.x*blockDim.x+threadIdx.x; //线程的实际索引
extern __shared__ int data[]; //共享内存,空间大小在核函数调用时(func<<<,,共享内存字节数>>>)分配
if(idx>=num)return; //防止索引越界
data[tid]=in[idx];
__syncthreads(); //等待共享内存拷贝数据结束
for(unsigned int s=blockDim.x/2;s>0;s>>=1) //块内归约操作
{
if(tid<s)
{
data[tid]=max(data[tid],data[tid+s]);
}
__syncthreads();
}
if(tid==0)out[blockIdx.x]=data[0]; //输出每个块内的归约结果
}
int main()
{
int a;
cout <<"数据量为2的几次方?(a<=25)"<<endl;
cin>>a;
int arraysize=1<<a;
int arraybytes=arraysize*sizeof(int);
int grid1,grid2;
int *h_in,*h_cpu,*h_gpu,*d_in,*d_out,*d_tmp1,*d_tmp2;
clock_t start,end;
double time;
h_in=(int *)malloc(arraybytes);
h_cpu=(int *)malloc(sizeof(int));
h_gpu=(int *)malloc(sizeof(int));
cout <<"数据量: "<<arraysize<<endl;
for(int i=0;i<arraysize;i++) //生成数据
{
//h_in[i]=(int)random();
h_in[i]=i;
}
*h_cpu=0;
start=clock();
for(int i=0;i<arraysize;i++) //CPU串行求最大值
{
*h_cpu=max(*h_cpu,h_in[i]);
}
end=clock();
time=end-start;
cout <<"cpu时间: "<<time/1000<<"ms"<<endl;
grid1=(arraysize-1)/k1+1; //设置网格大小(一维)
cout <<"网格1大小:"<<grid1 <<endl;
grid2=(grid1-1)/k2+1; //设置网格大小(一维)
cout <<"网格2大小:"<<grid2 <<endl;
cudaMalloc((void **)&d_in,arraybytes); //分配显存
cudaMalloc((void **)&d_tmp1,grid1*sizeof(int));
cudaMalloc((void **)&d_tmp2,grid2*sizeof(int));
cudaMalloc((void **)&d_out,sizeof(int));
/* for(int i=0;i<arraysize;i++)
{
cout << h_in[i] <<" ";
}
打印数据
*/
CHECK(cudaMemcpy(d_in,h_in,arraybytes,cudaMemcpyHostToDevice));
reduce<<<grid1,k1,k1*sizeof(int)>>>(d_in,d_tmp1,arraysize);
// CHECK(cudaDeviceSynchronize()); 检查核函数运行错误,输出错误位置及错误信息
reduce<<<grid2,k2,k2*sizeof(int)>>>(d_tmp1,d_tmp2,grid1);
// CHECK(cudaDeviceSynchronize());
reduce<<<1,grid2,grid2*sizeof(int)>>>(d_tmp2,d_out,grid2);
// CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(h_gpu,d_out,sizeof(int),cudaMemcpyDeviceToHost));
cout <<"cpu归约结果:"<<*h_cpu<<endl;
cout << "gpu归约结果:"<<*h_gpu <<endl;
free(h_in); //释放内存
free(h_cpu);
free(h_gpu);
cudaFree(d_in); //释放显存
cudaFree(d_tmp1);
cudaFree(d_tmp2);
cudaFree(d_out);
return 0;
}
|
19,172 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
__global__ void vecAdd(float* A, float* B, float* C)
{
// threadIdx.x is a built-in variable provided by CUDA at runtime
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
#define cudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__)
void __cudaSafeCall(cudaError_t err, char *file, int line)
{
if ((err) != cudaSuccess)
{
fprintf(stderr, "CUDA error in file %s at line %i: %s.\n", file, line, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
int main(int argc, char **argv)
{
int i, N = 720896; /* default vector size */
float *A, *devPtrA;
float *B, *devPtrB;
float *C, *devPtrC;
cudaEvent_t start, stop;
float rt;
/* check for user-supplied vector size */
if (argc > 1)
N = atoi(argv[1]);
printf("Running GPU vecAdd for %i elements\n", N);
/* allocate host memory */
A = (float*)malloc(N * sizeof(float));
B = (float*)malloc(N * sizeof(float));
C = (float*)malloc(N * sizeof(float));
/* generate random data */
for (i = 0; i < N; i++)
{
A[i] = (float)random();
B[i] = (float)RAND_MAX - A[i];
}
/* ----- GPU add-on ------- */
cudaSafeCall( cudaMalloc((void**)&devPtrA, N * sizeof(float)) );
cudaSafeCall( cudaMalloc((void**)&devPtrB, N * sizeof(float)) );
cudaSafeCall( cudaMalloc((void**)&devPtrC, N * sizeof(float)) );
/* timing code */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaSafeCall( cudaMemcpy(devPtrA, A, N * sizeof(float), cudaMemcpyHostToDevice) );
cudaSafeCall( cudaMemcpy(devPtrB, B, N * sizeof(float), cudaMemcpyHostToDevice) );
/* call compute kernel */
/* vecAdd(N, A, B, C); */
vecAdd<<<N/512, 512>>>(devPtrA, devPtrB, devPtrC);
cudaSafeCall( cudaMemcpy(C, devPtrC, N * sizeof(float), cudaMemcpyDeviceToHost) );
/* timing */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&rt, start, stop); /* in milliseconds */
rt /= 1E3; /* convert to seconds */
printf("time=%.4f seconds, MFLOPS=%.1f\n", rt, (float)N/rt/1E6);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaSafeCall( cudaFree(devPtrA) );
cudaSafeCall( cudaFree(devPtrB) );
cudaSafeCall( cudaFree(devPtrC) );
/* ------------ */
/* print out first 10 results */
for (i = 0; i < 10; i++)
printf("C[%i]=%.2f\n", i, C[i]);
/* free allocated host memory */
free(A);
free(B);
free(C);
return EXIT_SUCCESS;
}
|
19,173 | #include "includes.h"
__global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width) {
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
//Pvalue stores the Pd element that is computed by the thread
float Pvalue = 0;
for(int k = 0; k < Width ; ++k) {
float Mdelement = Md[ty*Width + k];
float Ndelement = Nd[k*Width + tx];
Pvalue += (Mdelement*Ndelement);
}
Pd[ty*Width + tx] = Pvalue;
} |
19,174 | #define W 500
#define H 500
#define D 500
#define TX 32
#define TY 32
#define TZ 32
int divUp(int a, int b){return (a+b-1)/b;}
__device__ float distance(int c,int r, int s ,float3 pos)
{
return sqrtf((c-pos.x)*(c-pos.x)+(r-pos.y)*(r-pos.y)+(s-pos.z)*(s-pos.z));
}
__global__
void distanceKernel(float *d_out, int w, int h, int d,float3 pos)
{
const int c = blockIdx.x*blockDim.x+threadIdx.x;
const int r = blockIdx.y*blockDim.y+threadIdx.y;
const int s = blockIdx.z*blockDim.z+threadIdx.z;
const int i = c+r*w+s*w*h;
if((c>=w) || (r>=h) || (s>=d)) return;
d_out[i]=distance(c,r,s,pos);
}
int main()
{
float *out=(float *)calloc(W*H*D, sizeof(float));
float *d_out;
cudaMalloc(&d_out, W*H*D*sizeof(float));
const float3 pos={0.0f,0.0f,0.0f};
const dim3 blockSize(TX,TY,TZ);
const int bx = divUp(W, TX);
const int by = divUp(H, TY);
const int bz = divUp(D, TZ);
const dim3 gridSize(bx,by,bz);
distanceKernel<<<gridSize,blockSize>>>(d_out, W,H,D,pos);
cudaMemcpy(out, d_out, W*H*D*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
}
|
19,175 | #include "includes.h"
__global__ void convolution_kernel(float *output, float *input, float *filter) {
//declare shared memory for this thread block
//the area reserved is equal to the thread block size plus
//the size of the border needed for the computation
//Write a for loop that loads all values needed by this thread block
//from global memory (input) and stores it into shared memory (sh_input)
//that is local to this thread block
//for ( ... ) {
//for ( ... ) {
//...
//}
//}
//synchronize to make all writes visible to all threads within the thread block
//compute using shared memory
//store result in the global memory
//store result to global memory
} |
19,176 | #include "includes.h"
const int Nthreads = 1024, maxFR = 5000, NrankMax = 6;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void sumChannels(const double *Params, const float *data, float *datasum, int *kkmax, const int *iC){
int tid, tid0,t, kmax, i, bid, NT, Nchan, NchanNear,j,iChan, Nsum, Nrank;
float Cf, Cmax;
NchanNear = (int) Params[10];
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nchan = (int) Params[9];
Nsum = (int) Params[13];
Nrank = (int) Params[14];
tid0 = tid + bid * blockDim.x;
while (tid0<NT){
for (i=0; i<Nchan;i++){
Cmax = 0.0f;
kmax = 0;
for (t=0;t<Nrank;t++){
Cf = 0.0f;
for(j=0; j<Nsum; j++){
iChan = iC[j+ NchanNear * i];
Cf += data[tid0 + NT * iChan + t * NT * Nchan];
if (Cf*Cf/(1+j) > Cmax){
Cmax = Cf*Cf /(1+j);
kmax = j + t*Nsum;
}
}
}
datasum[tid0 + NT * i] = Cmax;
kkmax[tid0 + NT * i] = kmax;
}
tid0 += blockDim.x * gridDim.x;
}
} |
19,177 | /*
Group info:
hkhetaw Harsh Khetawat
asiddiq Anas Siddiqui
rkrish11 Rahul Krishna
*/
#include <math.h>
/* floating point precision type definitions */
typedef double FP_PREC;
//returns the function y(x) = fn
FP_PREC fn(FP_PREC x)
{
return x*x;
}
|
19,178 | //
// Created by saleh on 10/8/18.
//
__global__ void kernel_sqrt_float(const float * __restrict__ g_idata, float * __restrict__ g_odata, unsigned long len){
unsigned long idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<len){
g_odata[idx] = sqrt(g_idata[idx]);
}
}
void sqrt_float(
const float *g_idata,
float *g_odata,
unsigned long len){
unsigned long blocksize, gridsize;
blocksize = 256;
gridsize = (len + blocksize -1 )/blocksize;
kernel_sqrt_float<<<gridsize,blocksize>>>(g_idata,g_odata,len);
} |
19,179 | #include "includes.h"
__global__ void matrixMult(int* m, int* n, int* p, int size)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int p_sum;
for (int i = 0;i < size;i++) {
p_sum += m[row * size + i] * n[col * size + i];
}
p[row * size + col] = p_sum;
} |
19,180 | #include "includes.h"
__global__ void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i+= stride)
{
y[i] = x[i] + y[i];
}
} |
19,181 | #include <stdio.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <iostream>
int unique_gpu_launcher(long long* input_voxel_ids_temp,
int* input_point_ids_temp,
int input_npoint) {
// key: voxel_id;
// value: point_ids.
thrust::sort_by_key(thrust::device, input_voxel_ids_temp, input_voxel_ids_temp+input_npoint, input_point_ids_temp);
thrust::pair<long long*,int*> new_end;
new_end = thrust::unique_by_key(thrust::device, input_voxel_ids_temp, input_voxel_ids_temp+input_npoint, input_point_ids_temp, thrust::equal_to<long long>());
// printf("%d\n", new_end.second - input_point_ids_temp);
int unique_count = new_end.second - input_point_ids_temp;
return unique_count;
// cout<<*unique_count<<endl;
} |
19,182 | #include <stdio.h>
#include <stdlib.h>
#define N 600
__global__ void MatAdd(int A[][N], int B[][N], int C[][N]){
int i = blockIdx.x;// genarating random genarated multidiomentional arrays
int j = blockIdx.y;
C[i][j] = A[i][j] + B[i][j]; // genarating random genarated multidiomentional arrays
}
//int** randmatfunc();
void randmatfunc(int newmat[N][N]){
int i, j, k;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
k = rand() % 100 + 1;; // printing those multidiomentional array list over here using for loop
printf("%d ", k);
newmat[i][j] =k;
}
printf("\n");
}
printf("\n--------------------------------------\n"); //printing new line (multidiomentional arrays)
}
int main(){
int A[N][N];
randmatfunc(A); //inside the main function calling randumfunction (A)
int B[N][N];
randmatfunc(B); //inside the main function calling randumfunction (B)
int C[N][N];
int (*d_A)[N], (*d_B)[N], (*d_C)[N]; // calculating genaratedarrays
cudaMalloc((void**)&d_A, (N*N)*sizeof(int)); // Allocates size bytes of linear memory on the device and returns in *devPtr a pointer to the allocated memory. returns cudaSuccess, cudaErrorMemoryAllocation
cudaMalloc((void**)&d_B, (N*N)*sizeof(int)); // Allocates size bytes of linear memory on the device and returns in *devPtr a pointer to the allocated memory. returns cudaSuccess, cudaErrorMemoryAllocation
cudaMalloc((void**)&d_C, (N*N)*sizeof(int)); // Allocates size bytes of linear memory on the device and returns in *devPtr a pointer to the allocated memory. returns cudaSuccess, cudaErrorMemoryAllocation
cudaMemcpy(d_A, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice); //Copies count bytes from the memory area pointed to by src to the memory area pointed to by dst, where kind is one of cudaMemcpyHostToHost, cudaMemcpyHostToDevice,
cudaMemcpy(d_B, B, (N*N)*sizeof(int), cudaMemcpyHostToDevice); //Copies count bytes from the memory area pointed to by src to the memory area pointed to by dst, where kind is one of cudaMemcpyHostToHost, cudaMemcpyHostToDevice,
cudaMemcpy(d_C, C, (N*N)*sizeof(int), cudaMemcpyHostToDevice); //Copies count bytes from the memory area pointed to by src to the memory area pointed to by dst, where kind is one of cudaMemcpyHostToHost, cudaMemcpyHostToDevice,
int numThreads = 1;
dim3 numBlocks(N,N);
MatAdd<<<numBlocks,numThreads>>>(d_A,d_B,d_C);
cudaMemcpy(C, d_C, (N*N)*sizeof(int), cudaMemcpyDeviceToHost); //CUDA memory copy types
int i, j; printf("C = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", C[i][j]); // printing new lines with printed data (multidiomentional array)
}
printf("\n");
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n");
return 0;
}
|
19,183 | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
const int VECTOR_SIZE = 1024;
__global__ void vector_add(int a[], int b[], int out[], size_t size) {
const size_t i = threadIdx.x;
if (i < size) {
out[i] = a[i] + b[i];
}
}
int main(int argc, char *argv[]) {
int *a, *b, *out;
cudaMallocManaged(&a, VECTOR_SIZE * sizeof(int));
cudaMallocManaged(&b, VECTOR_SIZE * sizeof(int));
cudaMallocManaged(&out, VECTOR_SIZE * sizeof(int));
for (size_t i = 0; i < VECTOR_SIZE; ++i) {
a[i] = i;
b[i] = i + 1;
out[i] = 0;
}
vector_add<<<1, VECTOR_SIZE>>>(a, b, out, VECTOR_SIZE);
cudaDeviceSynchronize();
for (size_t i = 0; i < VECTOR_SIZE; ++i) {
printf("out[%zd]: %d\n", i, out[i]);
}
cudaFree(a);
cudaFree(b);
cudaFree(out);
return 0;
}
|
19,184 |
////////////////////////////////////////////////////////////////////////
// define kernel block size for
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 32
#define BLOCK_Y 8
// device code
__global__ void GPU_adi_rhs(int NX, int NY, int NZ, float lam,
const float* __restrict__ d_u,
float* __restrict__ d_du,
float* __restrict__ d_ax,
float* __restrict__ d_bx,
float* __restrict__ d_cx,
float* __restrict__ d_ay,
float* __restrict__ d_by,
float* __restrict__ d_cy,
float* __restrict__ d_az,
float* __restrict__ d_bz,
float* __restrict__ d_cz)
{
int i, j, k, indg, active;
float du, a, b, c;
int NXM1 = NX - 1;
int NYM1 = NY - 1;
int NZM1 = NZ - 1;
#define IOFF 1
#define JOFF NX
#define KOFF NX*NY
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*BLOCK_X;
j = threadIdx.y + blockIdx.y*BLOCK_Y;
indg = i + j*NX;
active = (i<NX) && (j<NY);
//
// loop over k-planes
//
for (k = 0; k<NZ; k++) {
//
// calculate r.h.s. and set a,b,c, coefficients
//
if (active) {
if (i == 0 || i == NXM1 || j == 0 || j == NYM1 || k == 0 || k == NZM1) {
du = 0.0f; // Dirichlet b.c.'s
a = 0.0f;
b = 1.0f;
c = 0.0f;
}
else {
du = lam * (d_u[indg - IOFF] + d_u[indg + IOFF]
+ d_u[indg - JOFF] + d_u[indg + JOFF]
+ d_u[indg - KOFF] + d_u[indg + KOFF] - 6.0f*d_u[indg]);
a = -0.5f*lam;
b = 1.0f + lam;
c = -0.5f*lam;
}
d_du[indg] = du;
d_ax[indg] = a;
d_bx[indg] = b;
d_cx[indg] = c;
d_ay[indg] = a;
d_by[indg] = b;
d_cy[indg] = c;
d_az[indg] = a;
d_bz[indg] = b;
d_cz[indg] = c;
indg += KOFF;
}
}
}
__global__ void GPU_adi_x_float4(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
for (i = 4; i<NX; i += 4) {
indg = indg + 1;
a4 = d_a[indg];
b4 = d_b[indg];
c4 = d_c[indg];
d4 = d_d[indg];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
}
//
// reverse pass
//
d4.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4.x = dd;
d_d[indg] = d4;
for (i = NX - 5; i >= 0; i -= 4) {
indg = indg - 1;
dd = d2[i] - c2[i] * dd;
d4.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4.x = dd;
d_d[indg] = d4;
}
}
}
__global__ void GPU_adi_x_float4_2(int NX, int NY, int NZ,
const float4* __restrict__ d_a,
const float4* __restrict__ d_b,
const float4* __restrict__ d_c,
float4* __restrict__ d_d) {
int i, j, k, indg;
float aa, bb, cc, dd, c2[256], d2[256];
float4 a4, b4, c4, d4, a4_2, b4_2, c4_2, d4_2;
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x;
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY) / 4;
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
bb = 1.0f / b4.x;
cc = bb * c4.x;
dd = bb * d4.x;
c2[0] = cc;
d2[0] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[1] = cc;
d2[1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[2] = cc;
d2[2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[3] = cc;
d2[3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[4] = cc;
d2[4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[5] = cc;
d2[5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[6] = cc;
d2[6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[7] = cc;
d2[7] = dd;
for (i = 8; i<NX; i += 8) {
indg = indg + 2;
a4 = d_a[indg];
a4_2 = d_a[indg + 1];
__threadfence_block();
b4 = d_b[indg];
b4_2 = d_b[indg + 1];
__threadfence_block();
c4 = d_c[indg];
c4_2 = d_c[indg + 1];
__threadfence_block();
d4 = d_d[indg];
d4_2 = d_d[indg + 1];
aa = a4.x;
bb = b4.x - aa*cc;
dd = d4.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.x;
dd = bb*dd;
c2[i] = cc;
d2[i] = dd;
aa = a4.y;
bb = b4.y - aa*cc;
dd = d4.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.y;
dd = bb*dd;
c2[i + 1] = cc;
d2[i + 1] = dd;
aa = a4.z;
bb = b4.z - aa*cc;
dd = d4.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.z;
dd = bb*dd;
c2[i + 2] = cc;
d2[i + 2] = dd;
aa = a4.w;
bb = b4.w - aa*cc;
dd = d4.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4.w;
dd = bb*dd;
c2[i + 3] = cc;
d2[i + 3] = dd;
aa = a4_2.x;
bb = b4_2.x - aa*cc;
dd = d4_2.x - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.x;
dd = bb*dd;
c2[i + 4] = cc;
d2[i + 4] = dd;
aa = a4_2.y;
bb = b4_2.y - aa*cc;
dd = d4_2.y - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.y;
dd = bb*dd;
c2[i + 5] = cc;
d2[i + 5] = dd;
aa = a4_2.z;
bb = b4_2.z - aa*cc;
dd = d4_2.z - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.z;
dd = bb*dd;
c2[i + 6] = cc;
d2[i + 6] = dd;
aa = a4_2.w;
bb = b4_2.w - aa*cc;
dd = d4_2.w - aa*dd;
bb = 1.0f / bb;
cc = bb*c4_2.w;
dd = bb*dd;
c2[i + 7] = cc;
d2[i + 7] = dd;
}
//
// reverse pass
//
d4_2.w = dd;
dd = d2[NX - 2] - c2[NX - 2] * dd;
d4_2.z = dd;
dd = d2[NX - 3] - c2[NX - 3] * dd;
d4_2.y = dd;
dd = d2[NX - 4] - c2[NX - 4] * dd;
d4_2.x = dd;
dd = d2[NX - 5] - c2[NX - 5] * dd;
d4.w = dd;
dd = d2[NX - 6] - c2[NX - 6] * dd;
d4.z = dd;
dd = d2[NX - 7] - c2[NX - 7] * dd;
d4.y = dd;
dd = d2[NX - 8] - c2[NX - 8] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
for (i = NX - 9; i >= 0; i -= 8) {
indg = indg - 2;
dd = d2[i] - c2[i] * dd;
d4_2.w = dd;
dd = d2[i - 1] - c2[i - 1] * dd;
d4_2.z = dd;
dd = d2[i - 2] - c2[i - 2] * dd;
d4_2.y = dd;
dd = d2[i - 3] - c2[i - 3] * dd;
d4_2.x = dd;
dd = d2[i - 4] - c2[i - 4] * dd;
d4.w = dd;
dd = d2[i - 5] - c2[i - 5] * dd;
d4.z = dd;
dd = d2[i - 6] - c2[i - 6] * dd;
d4.y = dd;
dd = d2[i - 7] - c2[i - 7] * dd;
d4.x = dd;
d_d[indg + 1] = d4_2;
d_d[indg] = d4;
}
}
}
//
// new tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x_new(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int j, k, indg, t, tm, tp, nt, shift = 0;
float bbi;
__shared__ float a[256], c[256], d[256];
//
// set up indices for main block
//
t = threadIdx.x;
j = blockIdx.x;
k = blockIdx.y;
indg = t + NX*(j + k*NY);
bbi = 1.0f / d_b[indg];
a[t] = -bbi * d_a[indg];
c[t] = -bbi * d_c[indg];
d[t] = bbi * d_d[indg];
// forward pass
tm = 2 * t;
t = tm + 1;
tp = tm + 2;
for (nt = blockDim.x / 2; nt>0; nt >>= 1) {
shift++;
__syncthreads();
if (threadIdx.x < nt) {
bbi = 1.0f;
if (tm >= 0) {
bbi -= a[t] * c[tm];
d[t] += a[t] * d[tm];
a[t] = a[t] * a[tm];
}
if (tp<NX) {
bbi -= c[t] * a[tp];
d[t] += c[t] * d[tp];
c[t] = c[t] * c[tp];
}
bbi = 1.0f / bbi;
d[t] *= bbi;
a[t] *= bbi;
c[t] *= bbi;
tm = 2 * tm + 1;
t = 2 * t + 1;
tp = 2 * tp + 1;
}
}
// reverse pass
for (; shift>0; shift--) {
nt = blockDim.x >> shift;
__syncthreads();
if (threadIdx.x < nt) {
tm >>= 1;
t >>= 1;
tp >>= 1;
if (tm >= 0) d[tm] += c[tm] * d[t];
if (tp<NX) d[tp] += a[tp] * d[t];
}
}
__syncthreads();
d_d[indg] = d[threadIdx.x];
}
//
// old tri-diagonal solve in x-direction
//
__global__ void GPU_adi_x(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
j = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = NX*(j + k*NY);
if ((j<NY) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (i = 1; i<NX; i++) {
indg = indg + 1;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[i] = cc;
d[i] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (i = NX - 2; i >= 0; i--) {
indg = indg - 1;
dd = d[i] - c[i] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in y-direction
//
__global__ void GPU_adi_y(int NX, int NY, int NZ,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
float* __restrict__ d_d)
{
int i, j, k, indg;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
k = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + k*NX*NY;
if ((i<NX) && (k<NZ)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (j = 1; j<NY; j++) {
indg = indg + NX;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[j] = cc;
d[j] = dd;
}
//
// reverse pass
//
d_d[indg] = dd;
for (j = NY - 2; j >= 0; j--) {
indg = indg - NX;
dd = d[j] - c[j] * dd;
d_d[indg] = dd;
}
}
}
//
// tri-diagonal solve in z-direction, and update solution
//
__global__ void GPU_adi_z(int NX, int NY, int NZ,
float* __restrict__ d_u,
const float* __restrict__ d_a,
const float* __restrict__ d_b,
const float* __restrict__ d_c,
const float* __restrict__ d_d)
{
int i, j, k, indg, off;
float aa, bb, cc, dd, c[256], d[256];
//
// set up indices for main block
//
i = threadIdx.x + blockIdx.x*blockDim.x; // global indices
j = threadIdx.y + blockIdx.y*blockDim.y;
indg = i + j*NX;
off = NX*NY;
if ((i<NX) && (j<NY)) {
//
// forward pass
//
bb = 1.0f / d_b[indg];
cc = bb*d_c[indg];
dd = bb*d_d[indg];
c[0] = cc;
d[0] = dd;
for (k = 1; k<NZ; k++) {
indg = indg + off;
aa = d_a[indg];
bb = d_b[indg] - aa*cc;
dd = d_d[indg] - aa*dd;
bb = 1.0f / bb;
cc = bb*d_c[indg];
dd = bb*dd;
c[k] = cc;
d[k] = dd;
}
//
// reverse pass
//
d_u[indg] += dd;
for (k = NZ - 2; k >= 0; k--) {
indg = indg - off;
dd = d[k] - c[k] * dd;
d_u[indg] += dd;
}
}
}
|
19,185 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
unsigned int *histogram;
unsigned int *cumhistogram;
unsigned int *SK;
double *PrRK;
double *alpha;
double CLOCK() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec * 1000)+(t.tv_nsec*1e-6);
}
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// Add GPU kernel and functions
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel(unsigned char *input, //generates histogram
unsigned int *histogram){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
int myItem = input[location];
int myBin = myItem % 256;
atomicAdd(&(histogram[myBin]),1);
__syncthreads();
//print histogram with its sum
/*if(location==0)
{
int sum=0;
for(int i=0;i<256;i++)
{
printf("%d %d \n",i,histogram[i]);
sum+=histogram[i];
}
printf("sum=%d thredId=%d \n",sum,location);
}
*/
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel2(unsigned int size, //generates probability
unsigned int *histogram,
double *PrRK){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int ID = y*TILE_SIZE*gridDim.x+x;
//int ID = blockIdx.x*TILE_SIZE+threadIdx.x;//check ID
PrRK[ID]=(double)histogram[ID]/(double)size;
//printf("PrRk is: %f\n", PrRK[ID]);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel3(double *alpha,
unsigned int *cumhistogram,
unsigned int *SK){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
printf("alpha: %.15f \n", *alpha);
int ID = y*TILE_SIZE*gridDim.x+x;
//int ID = blockIdx.x*TILE_SIZE+threadIdx.x;//check ID
SK[ID]=cumhistogram[ID]*(*alpha);
printf("SK is : %d \n", SK[ID]);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////
void histogram_gpu(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&histogram , 256*sizeof(unsigned int)));
checkCuda(cudaMalloc((void**)&cumhistogram , 256*sizeof(unsigned int)));
checkCuda(cudaMalloc((void**)&PrRK , 256*sizeof(double)));
checkCuda(cudaMalloc((void**)&SK , 256*sizeof(unsigned int)));
checkCuda(cudaMemset(output_gpu , 0, size*sizeof(unsigned char)));
checkCuda(cudaMemset(histogram, 0, 256*sizeof(unsigned int)));
checkCuda(cudaMemset(cumhistogram, 0, 256*sizeof(unsigned int)));
checkCuda(cudaMemset(PrRK, 0, 256*sizeof(double)));
checkCuda(cudaMemset(SK, 0, 256*sizeof(unsigned int)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
dim3 dimGrid2(1, 1);
dim3 dimBlock2(TILE_SIZE, TILE_SIZE);
dim3 dimGrid3(1, 1);
dim3 dimBlock3(TILE_SIZE, TILE_SIZE);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
//printf("histogrammm: %d\n %d\n", histogram[250], histogram[0]);
kernel<<<dimGrid, dimBlock>>>(input_gpu,
histogram);
double *alpha2 = new(double);
*alpha2= 255/(double)size;
checkCuda(cudaDeviceSynchronize());
kernel2<<<dimGrid2, dimBlock2>>>(size,
histogram,
PrRK);
// generate cumhistogram
unsigned int *cum_histogram;
cum_histogram= new(unsigned int[256]);
unsigned int *histogram2;
histogram2 = new(unsigned int[256]);
checkCuda(cudaMemcpy(histogram2,
histogram,
256*sizeof(unsigned int),
cudaMemcpyDeviceToHost));
//printf("histogram2 is: %d\n", histogram2[100]);
/*unsigned int *histogram2;
histogram2 = new(unsigned int[256]);
checkCuda(cudaMemcpy(histogram2,
histogram,
256*sizeof(unsigned int),
cudaMemcpyDeviceToHost));*/
cum_histogram[0]=histogram2[0];
for (int i=1;i<256;i++)
{
cum_histogram[i]=cum_histogram[i-1]+histogram2[i];
//printf("cum_histogram: %d \n", cum_histogram[i]);
}
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaMemcpy(cumhistogram,
cum_histogram,
256*sizeof(unsigned int),
cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(alpha,
alpha2,
sizeof(double),
cudaMemcpyHostToDevice));
//checkCuda(cudaMemset(cumhistogram,cum_histogram, 256*sizeof(unsigned int)));
kernel3<<<dimGrid2, dimBlock2>>>(alpha,
cumhistogram,
SK);
//printf("gridXsize is: %d\n", gridXSize);
//printf("gridYsize is: %d\n", gridYSize);
//printf("TILE_SIZE is: %d\n",TILE_SIZE);
printf("alpha: %.15f \n", *alpha2);
//printf("%d \n", histogram[0]);
double *PrRK2;
PrRK2 = new(double[256]);
checkCuda(cudaMemcpy(PrRK2,
PrRK,
256*sizeof(double),
cudaMemcpyDeviceToHost));
double sum =0;
for (int i=0; i<256; i++)
{
//printf("%f \n", PrRK2[i]);
sum+=PrRK2[i];
}
printf ("real PrRK2 sum is: %f\n", sum);
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
checkCuda(cudaFree(histogram));
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
void histogram_gpu_warmup(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&histogram , 256*sizeof(unsigned int)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
checkCuda(cudaMemset(histogram, 0, 256*sizeof(unsigned int)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
kernel<<<dimGrid, dimBlock>>>(input_gpu,
histogram);
checkCuda(cudaDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
19,186 | #include "includes.h"
__global__ void arrayOfPriors1 ( const int dim, const int nwl, const float *cn, const float *nhMd, const float *nhSg, const float *xx, float *pr ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
float sum; //, theta, kk;
if ( i < nwl ) {
//theta = powf ( nhSg[i], 2 ) / nhMd[i];
//kk = nhMd[i] / theta;
//sum = ( kk - 1 ) * logf ( xx[NHINDX+i*nwl] ) - xx[NHINDX+i*nwl] / theta;
sum = 0; //powf ( ( xx[NHINDX+i*nwl] - nhMd[i] ) / nhSg[i], 2 );
pr[i] = ( cn[i] == dim ) * sum + ( cn[i] < dim ) * INF;
}
} |
19,187 | #include <chrono>
#include <iostream>
#include <ncurses.h>
#include <thread>
__attribute__((noinline)) void _abortError(const char* msg, const char* fname,
int line)
{
cudaError_t err = cudaGetLastError();
std::clog << fname << ": "
<< "line: " << line << ": " << msg << '\n';
std::clog << "Error " << cudaGetErrorName(err) << ": "
<< cudaGetErrorString(err) << '\n';
std::exit(1);
}
#define abortError(msg) _abortError(msg, __FUNCTION__, __LINE__)
__global__ void compute_iteration(char* buffer, char* out_buffer, size_t pitch,
size_t pitch_out, int width, int height)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= width || y >= height)
return;
int left_x = (x - 1 + width) % width;
int right_x = (x + 1) % width;
int up_y = (y - 1 + height) % height;
int down_y = (y + 1) % height;
char n_alive = buffer[up_y * pitch + left_x] + buffer[up_y * pitch + x]
+ buffer[up_y * pitch + right_x] + buffer[y * pitch + left_x]
+ buffer[y * pitch + right_x] + buffer[down_y * pitch + left_x]
+ buffer[down_y * pitch + x] + buffer[down_y * pitch + right_x];
out_buffer[y * pitch + x] =
n_alive == 3 || (buffer[y * pitch + x] && n_alive == 2);
}
static void run_compute_iteration(char* dev_buffer, char* out_dev_buffer,
size_t pitch, size_t pitch_out, int width,
int height,
int n_iterations) //, GLFWwindow *window)
{
constexpr int block_size = 32;
int w = std::ceil(1.f * width / block_size);
int h = std::ceil(1.f * height / block_size);
dim3 dimGrid(w, h);
dim3 dimBlock(block_size, block_size);
for (int i = 0; i < n_iterations; ++i)
{
compute_iteration<<<dimGrid, dimBlock>>>(
dev_buffer, out_dev_buffer, pitch, pitch_out, width, height);
std::swap(dev_buffer, out_dev_buffer);
// gui_display(window, dev_buffer, pitch, height, width);
}
if (cudaPeekAtLastError())
abortError("Computation error");
}
void gol_gpu(char* buffer, int width, int height, int n_iterations)
{
cudaError_t rc = cudaSuccess;
// Allocate device memory
char* dev_buffer;
char* out_dev_buffer;
size_t pitch;
size_t pitch_out;
rc = cudaMallocPitch(&dev_buffer, &pitch, width * sizeof(char), height);
if (rc)
abortError("Fail buffer allocation");
rc = cudaMallocPitch(&out_dev_buffer, &pitch_out, width * sizeof(char),
height);
if (rc)
abortError("Fail output buffer allocation");
if (cudaMemcpy2D(dev_buffer, pitch, buffer, width * sizeof(char),
width * sizeof(char), height, cudaMemcpyHostToDevice))
abortError("Fail memcpy host to device");
// GLFWwindow *window = gui_init(height, width);
run_compute_iteration(dev_buffer, out_dev_buffer, pitch, pitch_out, width,
height, n_iterations); //, window);
// gui_destroy(window);
rc = cudaFree(dev_buffer);
if (rc)
abortError("Unable to free buffer");
rc = cudaFree(out_dev_buffer);
if (rc)
abortError("Unable to free output buffer");
}
|
19,188 | // Exemplo para o curso de Super Computacao
// Criado por: Luciano P. Soares
#include <stdio.h>
#include <stdlib.h>
/* Rotina para somar dois vetores na GPU */
__global__ void add(double *a, double *b, double *c, int N) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i<N) { // Importante checar valor do i pois pode acessar fora do tamanho do vetor
c[i] = a[i] + b[i];
}
}
/* Programa cria dois vetores e soma eles em GPU */
int main() {
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
int blocksize, i, n;
cudaError_t error;
n=1<<23;
// Aloca vetores na memoria da CPU
h_a = (double *)malloc(n*sizeof(double));
h_b = (double *)malloc(n*sizeof(double));
h_c = (double *)malloc(n*sizeof(double));
// Preenche os vetores
for (i = 0; i < n; i++) {
h_a[i] = (double)i;
h_b[i] = (double)n-i;
}
// Aloca vetores na memoria da GPU
error = cudaMalloc((void **)&d_a,n*sizeof(double));
if(error!=cudaSuccess) {
printf("Memory Allocation CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **)&d_b,n*sizeof(double));
if(error!=cudaSuccess) {
printf("Memory Allocation CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **)&d_c,n*sizeof(double));
if(error!=cudaSuccess) {
printf("Memory Allocation CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copia valores da CPU para a GPU
error = cudaMemcpy(d_a, h_a, n*sizeof(double), cudaMemcpyHostToDevice);
if(error!=cudaSuccess) {
printf("Memory Copy CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_b, h_b, n*sizeof(double), cudaMemcpyHostToDevice);
if(error!=cudaSuccess) {
printf("Memory Copy CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Realiza calculo na GPU
blocksize = 256;
add<<<((n-1)/256 + 1),blocksize>>>(d_a,d_b,d_c,n);
// Retorna valores da memoria da GPU para a CPU
error = cudaMemcpy(h_c, d_c, n*sizeof(double), cudaMemcpyDeviceToHost);
if(error!=cudaSuccess) {
printf("Memory Copy CUDA failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Libera memoria da GPU
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Exibe um resultado para checar se valores conferem
for(i=0;i<n;i++) {
if(!(i%(n/8))) {
printf("a[%d] + b[%d] = c[%d] => ",i,i,i);
printf("%6.1f + %6.1f = %6.1f\n",h_a[i],h_b[i],h_c[i]);
}
}
// Libera memoria da CPU
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
19,189 | #include <stdio.h>
#include <stdint.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void readPnm(char * fileName,
int &width, int &height, uchar3 * &pixels)
{
FILE * f = fopen(fileName, "r");
if (f == NULL)
{
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
if (strcmp(type, "P3") != 0) // In this exercise, we don't touch other types
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int max_val;
fscanf(f, "%i", &max_val);
if (max_val > 255) // In this exercise, we assume 1 byte per value
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i < width * height; i++)
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
fclose(f);
}
void writePnm(uchar3 * pixels, int width, int height,
char * fileName)
{
FILE * f = fopen(fileName, "w");
if (f == NULL)
{
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++)
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
fclose(f);
}
__global__ void blurImgKernel(uchar3 * inPixels, int width, int height,
float * filter, int filterWidth,
uchar3 * outPixels)
{
// TODO
// Get data "row" and "column" indices
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
// The center of the filter
int centerIdx = filterWidth >> 1;
// Convolution
if (ix < width && iy < height)
{
// Temporary summation
float sumX = 0.0f, sumY = 0.0f, sumZ = 0.0f;
// Instead of indexing the filter from 0
// We start from -centerIdx to +centerIdx
// This helps computing data indices more naturally
for (int rFilter = -centerIdx; rFilter < centerIdx + 1; rFilter++)
{
for (int cFilter = -centerIdx; cFilter < centerIdx + 1; cFilter++)
{
// The filter element to be used
// Plus centerIdx to do the right 0-indexing
int iFilter = (rFilter + centerIdx) * filterWidth + (cFilter + centerIdx);
// rPatched and cPatched are pseudo indices
// Assume that we are extending the original image "centerIdx" wider on each edge
int rPatched = iy + rFilter, cPatched = ix + cFilter;
// rConv and cConv are the actual indices to be used for the convolution
// Here we make use of the nested ternary operator in C/C++
// If (Patched is inside the original image) then Conv = Patched (internal)
// Else If (Patched < 0) then Conv = 0 (lower bound)
// Esle Conv = height/width - 1 (upper bound)
int rConv = (rPatched >= 0 && rPatched <= height - 1) ? rPatched : (rPatched < 0) ? 0 : (height - 1);
int cConv = (cPatched >= 0 && cPatched <= width - 1) ? cPatched : (cPatched < 0) ? 0 : (width - 1);
// The image element to be multiplied at this step
int iConv = rConv * width + cConv;
// A single multiplication
sumX += inPixels[iConv].x * filter[iFilter];
sumY += inPixels[iConv].y * filter[iFilter];
sumZ += inPixels[iConv].z * filter[iFilter];
}
}
// The actual output element
int i = iy * width + ix;
outPixels[i].x = sumX;
outPixels[i].y = sumY;
outPixels[i].z = sumZ;
}
}
void blurImg(uchar3 * inPixels, int width, int height, float * filter, int filterWidth,
uchar3 * outPixels,
bool useDevice=false, dim3 blockSize=dim3(1, 1))
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
// TODO
// The center of the filter
int centerIdx = filterWidth >> 1;
for (int r = 0; r < height; r++)
{
for (int c = 0; c < height; c++)
{
// Convolution
// Temporary summation
float sumX = 0.0f, sumY = 0.0f, sumZ = 0.0f;
// Instead of indexing the filter from 0
// We start from -centerIdx to +centerIdx
// This helps computing data indices more naturally
for (int rFilter = -centerIdx; rFilter < centerIdx + 1; rFilter++)
{
for (int cFilter = -centerIdx; cFilter < centerIdx + 1; cFilter++)
{
// The filter element to be used
// Plus centerIdx to do the right 0-indexing
int iFilter = (rFilter + centerIdx) * filterWidth + (cFilter + centerIdx);
// rPatched and cPatched are pseudo indices
// Assume that we are extending the original image "centerIdx" wider on each edge
int rPatched = r + rFilter, cPatched = c + cFilter;
// rConv and cConv are the actual indices to be used for the convolution
// Here we make use of the nested ternary operator in C/C++
// If (Patched is inside the original image) then Conv = Patched (internal)
// Else If (Patched < 0) then Conv = 0 (lower bound)
// Esle Conv = height/width - 1 (upper bound)
int rConv = (rPatched >= 0 && rPatched <= height - 1) ? rPatched : (rPatched < 0) ? 0 : (height - 1);
int cConv = (cPatched >= 0 && cPatched <= width - 1) ? cPatched : (cPatched < 0) ? 0 : (width - 1);
// The image element to be multiplied at this step
int iConv = rConv * width + cConv;
// A single multiplication
sumX += inPixels[iConv].x * filter[iFilter];
sumY += inPixels[iConv].y * filter[iFilter];
sumZ += inPixels[iConv].z * filter[iFilter];
}
}
// The actual output element
int i = r * width + c;
outPixels[i].x = sumX;
outPixels[i].y = sumY;
outPixels[i].z = sumZ;
}
}
}
else // Use device
{
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
printf("GPU name: %s\n", devProp.name);
printf("GPU compute capability: %d.%d\n", devProp.major, devProp.minor);
// TODO
// Allocate device memories
uchar3 *d_inPixels, *d_outPixels;
float *d_filter;
CHECK(cudaMalloc(&d_inPixels, width * height * sizeof(uchar3)));
CHECK(cudaMalloc(&d_outPixels, width * height * sizeof(uchar3)));
CHECK(cudaMalloc(&d_filter, filterWidth * filterWidth * sizeof(float)))
// Copy data to device memories
CHECK(cudaMemcpy(d_inPixels, inPixels, width * height * sizeof(uchar3), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_filter, filter, filterWidth * filterWidth * sizeof(float), cudaMemcpyHostToDevice));
// Set grid size and call kernel (remember to check kernel error)
dim3 gridSize((width - 1)/blockSize.x + 1, (height - 1)/blockSize.y + 1);
blurImgKernel<<<gridSize, blockSize>>>(d_inPixels, width, height, d_filter, filterWidth, d_outPixels);
cudaError_t errorCode = cudaGetLastError();
if (cudaSuccess != errorCode)
{
printf("Error: kernel did not run properly\n");
printf("Code: %d, Reason: %s\n\n", errorCode, cudaGetErrorString(errorCode));
return;
}
// Copy result from device memories
CHECK(cudaMemcpy(outPixels, d_outPixels, width*height*sizeof(uchar3), cudaMemcpyDeviceToHost));
// Free device memories
CHECK(cudaFree(d_inPixels));
CHECK(cudaFree(d_outPixels));
CHECK(cudaFree(d_filter));
}
timer.Stop();
float time = timer.Elapsed();
printf("Processing time (%s): %f ms\n",
useDevice == true? "use device" : "use host", time);
}
float computeError(uchar3 * a1, uchar3 * a2, int n)
{
float err = 0;
for (int i = 0; i < n; i++)
{
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
char * concatStr(const char * s1, const char * s2)
{
char * result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
int main(int argc, char ** argv)
{
if (argc != 4 && argc != 6)
{
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
// Read input image file
int width, height;
uchar3 * inPixels;
readPnm(argv[1], width, height, inPixels);
printf("Image size (width x height): %i x %i\n\n", width, height);
// Read correct output image file
int correctWidth, correctHeight;
uchar3 * correctOutPixels;
readPnm(argv[3], correctWidth, correctHeight, correctOutPixels);
if (correctWidth != width || correctHeight != height)
{
printf("The shape of the correct output image is invalid\n");
return EXIT_FAILURE;
}
// Set up a simple filter with blurring effect
int filterWidth = 9;
float * filter = (float *)malloc(filterWidth * filterWidth * sizeof(float));
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
filter[filterR * filterWidth + filterC] = 1. / (filterWidth * filterWidth);
}
}
// Blur input image using host
uchar3 * hostOutPixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, hostOutPixels);
// Compute mean absolute error between host result and correct result
float hostErr = computeError(hostOutPixels, correctOutPixels, width * height);
printf("Error: %f\n\n", hostErr);
// Blur input image using device
uchar3 * deviceOutPixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
dim3 blockSize(32, 32); // Default
if (argc == 6)
{
blockSize.x = atoi(argv[4]);
blockSize.y = atoi(argv[5]);
}
blurImg(inPixels, width, height, filter, filterWidth, deviceOutPixels, true, blockSize);
// Compute mean absolute error between device result and correct result
float deviceErr = computeError(deviceOutPixels, correctOutPixels, width * height);
printf("Error: %f\n\n", deviceErr);
// Write results to files
char * outFileNameBase = strtok(argv[2], "."); // Get rid of extension
writePnm(hostOutPixels, width, height, concatStr(outFileNameBase, "_host.pnm"));
writePnm(deviceOutPixels, width, height, concatStr(outFileNameBase, "_device.pnm"));
// Free memories
free(inPixels);
free(correctOutPixels);
free(hostOutPixels);
free(deviceOutPixels);
free(filter);
}
|
19,190 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#define DEBUG 0
#define ENUM_NUM 19 // the number of loops in each thread
#define UNKNOWN_NUM 64 // the number of unknowns
#define POLY_NUM 64 // the number of linear polynomials
#define PARA_NUM 64 // the number of parameters
#define NONLINEAR_NUM 64 // the number of nonlinear polynomials
#define SOL_MAX_NUM 200
#define RESULT_MAX_NUM 5
// for GPU
#define BLOCK_NUM 32 //2^5
#define THREAD_NUM 256 // 2^8
#define THREADS_SHIFT 13 // (5+8)
typedef long value_t; // to save values of variables.
//typedef unsigned long constpart_t; // the part with no parameters.
typedef unsigned long linearpart_t; // to save 32 unknowns and 1 contants.
typedef unsigned long squarepart_t;
typedef unsigned long oripoly_t;
typedef unsigned char UINT8;
typedef unsigned long long UINT64;
//for GPU
__device__ linearpart_t d_linear_mat[ENUM_NUM * POLY_NUM * 2];
__device__ oripoly_t d_polys_mat[NONLINEAR_NUM * (POLY_NUM + UNKNOWN_NUM + 1) * 3];
__device__ squarepart_t d_square_mat[ENUM_NUM * POLY_NUM];
__device__ value_t d_var_all[2560];
/**
* Print the number in binary, on CPU
*/
static inline void binary_print(value_t val, int len) {
for (int i = 0; i < len; i++) {
if (val & ((value_t)1 << i)) {
printf("1");
} else {
printf("0");
}
if ((i + 1) % 5 == 0) {
printf(" ");
}
}
}
/**
* Find the position of the first nonzero bit, both on GPU and CPU
*/
static inline __host__ __device__ int largestpos(value_t val, int len) {
for (int i = len - 1; i >= 0; i--) {
if (val & ((value_t) 1 << i)) {
return i;
}
}
return -1;
}
/**
* Find the position of the first nonzero bit, both on GPU and CPU
*
*/
static inline __host__ __device__ int largestpos_2(value_t val0, value_t val1, int len) {
int p = 0;
if (len > 64 && len <= 128) {
p = largestpos(val1, len - 64);
if (p > -1) {
return p + 64;
} else {
p = largestpos(val0, 64);
}
} else {
p = largestpos(val0, 64);
}
return p;
}
/**
* Solve the linear system by Guassian Elimination, on CPU
*/
static inline value_t gauss_host(linearpart_t working_mat[POLY_NUM][2],
const int poly_num, const int unknown_num, value_t solutions[SOL_MAX_NUM]) {
int pos_arr[POLY_NUM];
int rank = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
pos_arr[pi] = largestpos_2(working_mat[pi][0],working_mat[pi][1], unknown_num + 1);
rank++;
if (pos_arr[pi] == 0) {
return 0;
}
for (int j = pi + 1; j < POLY_NUM; j++) {
if(working_mat[j][pos_arr[pi]/64] & ((linearpart_t)1 << (pos_arr[pi] % 64))){
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
// back reduced
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
for (int j = 0; j < pi; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
if (rank == unknown_num) {
// only one solution.
solutions[0] = 0;
;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 ) {
continue;
}
if (working_mat[pi][0] & (linearpart_t)1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
return 1;
} else {
// multi-solutions
solutions[0] = 0;
value_t sol_num = 1;
bool appear[UNKNOWN_NUM + 1] = { 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
appear[pos_arr[pi]] = true;
if (working_mat[pi][0] & (linearpart_t)1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
// duplicate solutions.
for (int i = 1; i < UNKNOWN_NUM+1; i++) {
if (appear[i] == false) {
for (int j = 0; j < sol_num; j++) {
solutions[j + sol_num] = (solutions[j]) ^ ((value_t)1 << (i-1));
}
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 ) {
continue;
}
if(i < 64){
for (int j = 0; j < sol_num * ((working_mat[pi][0] & (((linearpart_t) 1) << i)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}else{
for (int j = 0; j < sol_num * ((working_mat[pi][1] & (((linearpart_t) 1) << i-64)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}
}
sol_num *= 2;
}
}
return sol_num;
}
}
/**
* Solve the linear system by Guassian Elimination, on GPU
*/
static inline __device__ value_t gauss(value_t solutions[SOL_MAX_NUM], linearpart_t working_mat[POLY_NUM][2],
const int poly_num, const int unknown_num) {
// bear revised
int pos_arr[POLY_NUM]; // bear revised
int rank = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
pos_arr[pi] = largestpos_2(working_mat[pi][0], working_mat[pi][1],unknown_num + 1);
rank++;
if (pos_arr[pi] == 0) {
return 0;
}
for (int j = pi + 1; j < POLY_NUM; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
// back reduced
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
for (int j = 0; j < pi; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
}
}
}
if (rank == unknown_num) {
// only one solution.
solutions[0]= 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
if (working_mat[pi][0] & (linearpart_t) 1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
return 1;
} else {
solutions[0] = 0;
value_t sol_num = 1;
bool appear[UNKNOWN_NUM + 1] = { 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0) {
continue;
}
appear[pos_arr[pi]] = true;
if (working_mat[pi][0] & (linearpart_t) 1) {
solutions[0] ^= ((value_t)1 << (pos_arr[pi]-1));
}
}
// duplicate solutions.
for (int i = 1; i < UNKNOWN_NUM + 1; i++) {
if (appear[i] == false) {
for (int j = 0; j < sol_num; j++) {
solutions[j + sol_num] = (solutions[j]) ^ ((value_t)1 << (i-1));
}
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 ) {
continue;
}
if(i < 64){
for (int j = 0; j < sol_num * ((working_mat[pi][0] & (((linearpart_t) 1) << i)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}else{
for (int j = 0; j < sol_num * ((working_mat[pi][1] & (((linearpart_t) 1) << i-64)) != 0); j++) {
solutions[j + sol_num] ^= ((value_t) 1 << (pos_arr[pi] - 1));
}
}
}
sol_num *= 2;
}
}
return sol_num;
}
}
/**
* Traverse the values of parameters to obtain matrices of linear system. Solve the system and count the numbers of satisfying equations.
*/
__global__ void solveLinear(const linearpart_t *d_working_mat_copy,
const squarepart_t *d_const_mat, value_t *d_val, char *d_bound, value_t *d_sol_total,value_t* result) {
int thidx = blockDim.x * blockIdx.x + threadIdx.x;
value_t val = d_val[thidx];
char bound = d_bound[thidx];
value_t res[2 * RESULT_MAX_NUM];
char res_num = 0;
linearpart_t working_mat[POLY_NUM][2]; // initialized as the const part of linear matrix. also used as the results of linear part.
linearpart_t working_mat_copy[POLY_NUM][2];
squarepart_t const_mat[POLY_NUM];
d_sol_total[thidx] = 0;
oripoly_t cstpoly[3] = {0, 0, 0};
//copy data from device
for(int i = 0; i < 2 * RESULT_MAX_NUM; i++){
res[i] = 0;
}
res[0] = result[thidx * 2 * RESULT_MAX_NUM];
res[1] = result[thidx * 2 * RESULT_MAX_NUM + 1];
for (int i = 0; i < POLY_NUM; i++) {
working_mat_copy[i][0] = d_working_mat_copy[thidx * POLY_NUM * 2 + i*2];
working_mat_copy[i][1] = d_working_mat_copy[thidx * POLY_NUM * 2 + i*2 + 1];
const_mat[i] = d_const_mat[thidx * POLY_NUM + i];
}
// main loop.
for (value_t count = 1; count < (1 << ENUM_NUM); count++) {
// generate the next gray code
int pos = 64-__ffsll(__brevll(count ^ (count - 1)));
val = val ^ ((value_t) 1 << pos);
for (int pi = 0; pi < POLY_NUM; pi++) {
working_mat_copy[pi][0] ^= d_linear_mat[pos * POLY_NUM * 2 + pi * 2];
working_mat_copy[pi][1] ^= d_linear_mat[pos * POLY_NUM * 2 + pi * 2 + 1];
const_mat[pi] ^= d_square_mat[pos * POLY_NUM + pi];
working_mat[pi][0] = working_mat_copy[pi][0];
working_mat[pi][1] = working_mat_copy[pi][1];
value_t w = const_mat[pi] & val;
working_mat[pi][0] ^= (bool)((__popcll((unsigned long long int)w)) & (value_t) 1);
}
value_t solutions[SOL_MAX_NUM];
value_t sol_num = 0;
// gaussian elimination
sol_num = gauss(solutions, working_mat, POLY_NUM, UNKNOWN_NUM);
d_sol_total[thidx] += sol_num;
// verify on 3 round keccak
for(int s = 0;s < sol_num;s++){
int zero_num = 0;
int one_num = 0;
int op;
for (op = 0; op < NONLINEAR_NUM; op++) {
cstpoly[0] = d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + UNKNOWN_NUM) * 3];
cstpoly[1] = d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + UNKNOWN_NUM) * 3 + 1];
cstpoly[2] = d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + UNKNOWN_NUM) * 3 + 2];
// for parameters.
for (int pa = 0; pa < PARA_NUM; pa++) {
if (val & ((value_t) 1 << pa)) {
cstpoly[0] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM - pa -1) * 3];
cstpoly[1] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM - pa -1) * 3 + 1];
cstpoly[2] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM - pa -1) * 3 + 2];
}
}
for (int un = 0; un < UNKNOWN_NUM; un++) {
if (solutions[s] & ((value_t) 1 << un)) {
cstpoly[0] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + un) * 3];
cstpoly[1] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + un) * 3 + 1];
cstpoly[2] ^= d_polys_mat[op * (PARA_NUM + UNKNOWN_NUM + 1) * 3 + (PARA_NUM + un) * 3 + 2];
}
}
// evaluate
cstpoly[0] = cstpoly[0] & val;
cstpoly[1] = cstpoly[1] & solutions[s];
cstpoly[2] = cstpoly[2] & ((oripoly_t)1);
value_t w = cstpoly[0] ^ cstpoly[1] ^ cstpoly[2];
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
zero_num ++;
if(zero_num > NONLINEAR_NUM - bound){
break;
}
}else{
one_num++;
}
}
if(zero_num + one_num == NONLINEAR_NUM && one_num >= bound){
if(one_num > bound){
bound = one_num;
res_num = 0;
for(int ir = 0; ir < 2 * RESULT_MAX_NUM; ir++ ){
res[ir] = 0;
}
}
if(res_num < RESULT_MAX_NUM){
res[res_num * 2] = val;
res[res_num * 2 + 1 ] = solutions[s];
res_num ++;
}
// printf("\nval:%lu,sol:%lu,count:%d(bound:%d) thidx:%d \n",val,solutions[s],one_num,bound, thidx);
}
}
}
d_bound[thidx] = bound;
for(int i = 0; i < 2 * RESULT_MAX_NUM; i++){
result[thidx * 2 * RESULT_MAX_NUM + i] = res[i];
}
}
int main(int argc, char** argv) {
char bound = 0;
const int para_num = PARA_NUM;
const int enum_num = ENUM_NUM;
const int ori_num = NONLINEAR_NUM;
value_t set_val = atol(argv[1])<<THREADS_SHIFT;
const int poly_num = POLY_NUM;
const int unknown_num = UNKNOWN_NUM;
linearpart_t linear_mat[para_num][poly_num][2];
linearpart_t working_mat[poly_num][2]; // initialized as the const part of linear matrix. also used as the results of linear part.
linearpart_t working_mat_copy[poly_num][2];
linearpart_t working_mat_file[poly_num][2];
squarepart_t square_mat[para_num][poly_num];
squarepart_t const_mat[poly_num]; // used to compute the const part from square polys.
oripoly_t polys[ori_num][para_num + unknown_num + 1][3];
oripoly_t cstpoly[3];
cudaSetDevice(atoi(argv[2])); // set the GPU device
//read the matrix files
FILE *in1 = fopen("./data/linear_mat224.txt", "r+");
FILE *in2 = fopen("./data/square_mat224.txt", "r+");
FILE *in3 = fopen("./data/polys_mat224.txt", "r+");
FILE *in4 = fopen("./data/working_mat224.txt", "r+");
char c1, c2, c3, c4;
for (int i = 0; i < para_num; i++) {
for (int j = 0; j < poly_num; j++) {
linear_mat[i][j][0] = 0;
linear_mat[i][j][1] = 0;
square_mat[i][j] = 0;
for (int k = 0; k < 128; k++) {
fscanf(in1, "%c", &c1);
while (c1 != '0' && c1 != '1') {
fscanf(in1, "%c", &c1);
}
if (c1 == '1') {
linear_mat[i][j][k/64] ^= ((linearpart_t) 1 << (k-((int)k/64)*64));
}
}
for (int k = 0; k < para_num; k++) {
fscanf(in2, "%c", &c2);
while (c2 != '0' && c2 != '1') {
fscanf(in2, "%c", &c2);
}
if (c2 == '1') {
square_mat[i][j] ^= ((squarepart_t) 1 << (para_num - 1) - k);
}
}
}
}
for (int i = 0; i < ori_num; i++) {
for (int j = 0; j < para_num + unknown_num + 1; j++) {
polys[i][j][0] = 0;
polys[i][j][1] = 0;
polys[i][j][2] = 0;
for (int k = 0; k < 192; k++) {
fscanf(in3, "%c", &c3);
while (c3 != '0' && c3 != '1') {
fscanf(in3, "%c", &c3);
}
if (k < para_num && c3 == '1') {
polys[i][j][0] ^= ((oripoly_t) 1 << (para_num - k -1));
} else if (k >= para_num && k<para_num+unknown_num && c3 == '1') {
polys[i][j][1] ^= ((oripoly_t) 1 << (k - para_num));
}else if(c3 == '1'){
polys[i][j][2] ^= ((oripoly_t) 1);
}
}
}
}
for (int i = 0; i < poly_num; i++) {
working_mat[i][0] = 0;
working_mat[i][1] = 0;
for (int j = 0; j < 128; j++) {
fscanf(in4, "%c", &c4);
while (c4 != '0' && c4 != '1') {
fscanf(in4, "%c", &c4);
}
if (c4 == '1') {
working_mat[i][(int)j/64] ^= ((linearpart_t) 1 << (j - ((int)j/64)*64));
}
}
working_mat_file[i][0] = working_mat[i][0];
working_mat_file[i][1] = working_mat[i][1];
}
fclose(in1);
fclose(in2);
fclose(in3);
fclose(in4);
printf("finish reading file!\n");
//allocate device memory
linearpart_t linear_mat_enum[ENUM_NUM * POLY_NUM * 2];
squarepart_t square_mat_enum[ENUM_NUM * POLY_NUM];
oripoly_t polys_mat_enum[ori_num * (para_num + unknown_num + 1) * 3 ];
for (int i = 0; i < ENUM_NUM; i++) {
for (int j = 0; j < POLY_NUM; j++) {
for (int k = 0; k < 2; k++) {
linear_mat_enum[i * POLY_NUM * 2 + j * 2 + k] =
linear_mat[i][j][k];
}
}
}
for (int i = 0; i < ENUM_NUM; i++) {
for (int j = 0; j < POLY_NUM; j++) {
square_mat_enum[i * POLY_NUM + j] = square_mat[i][j];
}
}
for(int i = 0; i < ori_num; i++){
for(int j = 0; j < para_num + unknown_num + 1; j++){
for(int k = 0; k < 3; k++){
polys_mat_enum[i * (para_num + unknown_num + 1) * 3 + j * 3 + k] = polys[i][j][k];
}
}
}
cudaMemcpyToSymbol(d_linear_mat, linear_mat_enum,
2 * ENUM_NUM * POLY_NUM * sizeof(linearpart_t));
cudaMemcpyToSymbol(d_square_mat, square_mat_enum,
ENUM_NUM * POLY_NUM * sizeof(squarepart_t));
cudaMemcpyToSymbol(d_polys_mat, polys_mat_enum,
3 * NONLINEAR_NUM * (PARA_NUM + UNKNOWN_NUM + 1) * sizeof(oripoly_t));
printf("finish copying device memory!\n");
cudaError_t err = cudaSuccess;
int thidx = BLOCK_NUM * THREAD_NUM;
value_t *d_val = NULL;
err = cudaMalloc((void **) &d_val, thidx * sizeof(value_t));
if (err != cudaSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
char *d_bound = NULL;
err = cudaMalloc((void **) &d_bound, thidx * sizeof(char));
if (err != cudaSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t *d_sol_total = NULL;
err = cudaMalloc((void **) &d_sol_total, thidx * sizeof(value_t));
if (err != cudaSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
linearpart_t *d_working_mat_copy = NULL;
err = cudaMalloc((void **) &d_working_mat_copy,
thidx * poly_num * 2 * sizeof(linearpart_t));
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to allocate device working_mat_copy (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
squarepart_t *d_const_mat = NULL;
err = cudaMalloc((void **) &d_const_mat,
thidx * poly_num * sizeof(squarepart_t));
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to allocate devices const_mat (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy oripolys from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t h_result[thidx * RESULT_MAX_NUM * 2];
for(int i = 0; i < thidx * RESULT_MAX_NUM * 2 ; i ++){
h_result[i] = 0;
}
printf("finish allocate device memory!\n");
//deal with the case with all parameters are 0's on CPU
int s_total_p0 = 0;
value_t *val_arr = (value_t*)calloc(thidx, sizeof(value_t));
linearpart_t *working_mat_copy_arr = (linearpart_t*)calloc(thidx * POLY_NUM * 2, sizeof(linearpart_t));
squarepart_t *const_mat_arr = (squarepart_t*)calloc(thidx * POLY_NUM, sizeof(squarepart_t));
char *bound_arr = (char*)calloc(thidx, sizeof(short));
for (int thi = 0; thi < thidx; thi++) {
value_t sol_num = 0;
value_t solutions[SOL_MAX_NUM];
value_t val = (set_val + (value_t) thi) << ENUM_NUM;
val_arr[thi] = val;
for (int pi = 0; pi < POLY_NUM; pi++) {
working_mat[pi][0] = working_mat_file[pi][0];
working_mat[pi][1] = working_mat_file[pi][1];
const_mat[pi] = 0;
}
for (int pos = enum_num; pos < para_num; pos++) {
if (val & ((value_t) 1 << pos)) {
for (int pi = 0; pi < poly_num; pi++) {
working_mat[pi][0] ^= linear_mat[pos][pi][0];
working_mat[pi][1] ^= linear_mat[pos][pi][1];
}
for (int pi = 0; pi < poly_num; pi++) {
const_mat[pi] ^= square_mat[pos][pi];
}
}
}
for (int i = 0; i < poly_num; i++) {
working_mat_copy[i][0] = working_mat[i][0];
working_mat_copy[i][1] = working_mat[i][1];
working_mat_copy_arr[thi * POLY_NUM * 2 + 2 * i] = working_mat_copy[i][0];
working_mat_copy_arr[thi * POLY_NUM * 2 + 2 * i + 1] = working_mat_copy[i][1];
const_mat_arr[thi * POLY_NUM + i] = const_mat[i];
}
for (int pi = 0; pi < poly_num; pi++) {
value_t w = const_mat[pi] & val;
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
working_mat[pi][0] ^= (linearpart_t) 1;
}
}
sol_num = gauss_host(working_mat, POLY_NUM, UNKNOWN_NUM, solutions);
s_total_p0 += sol_num;
//verify the solutions
for (int s = 0; s < sol_num; s++) {
int one_num = 0;
int zero_num = 0;
int op;
for (op = 0; op < ori_num; op++) {
cstpoly[0] = polys[op][para_num + unknown_num][0];
cstpoly[1] = polys[op][para_num + unknown_num][1];
cstpoly[2] = polys[op][para_num + unknown_num][2];
// for parameters.
for (int pa = 0; pa < para_num; pa++) {
if (val & ((value_t) 1 << pa)) {
cstpoly[0] ^= polys[op][para_num - pa - 1][0];
cstpoly[1] ^= polys[op][para_num - pa - 1][1];
cstpoly[2] ^= polys[op][para_num - pa - 1][2];
}
}
for (int un = 0; un < unknown_num; un++) {
if (solutions[s] & ((value_t) 1 << un)) {
cstpoly[0] ^= polys[op][para_num + un][0];
cstpoly[1] ^= polys[op][para_num + un][1];
cstpoly[2] ^= polys[op][para_num + un][2];
}
}
cstpoly[2] ^= polys[op][unknown_num + para_num][2];
// evaluate
cstpoly[0] = cstpoly[0] & val;
cstpoly[1] = cstpoly[1] & solutions[s];
cstpoly[2] = cstpoly[2] & ((oripoly_t) 1);
value_t w = cstpoly[0] ^ cstpoly[1] ^ cstpoly[2];
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
zero_num++;
if (zero_num > ori_num - bound) {
break;
}
} else {
one_num++;
}
}
if (zero_num + one_num == ori_num) {
printf( "val:%lu,sol:%lu,count:%d\n", val, solutions[s],one_num);
h_result[thi * RESULT_MAX_NUM * 2] = val;
h_result[thi * RESULT_MAX_NUM * 2 + 1] = solutions[s];
}
if (one_num > bound) {
bound = one_num;
bound_arr[thi] = bound;
}
}
}
for(int i = 0; i < thidx;i++){
if(bound_arr[i] < bound){
bound_arr[i] = bound;
h_result[i * RESULT_MAX_NUM * 2] = 0;
h_result[i * RESULT_MAX_NUM * 2 + 1] = 0;
}
}
printf("finish cpu computing! the bound is %d now...\n", bound);
//begin device part: copy value from host ot devide
err = cudaMemcpy(d_val, val_arr, thidx * sizeof(value_t),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("Failed to copy value from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_bound, bound_arr, thidx * sizeof(char),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("Failed to copy bound from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t *d_result = NULL;
err = cudaMalloc((void **) &d_result, thidx * RESULT_MAX_NUM * 2 * sizeof(value_t));
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to allocate devices result (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_result, h_result,
thidx * RESULT_MAX_NUM * 2 * sizeof(value_t),cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy result from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_working_mat_copy, working_mat_copy_arr,
thidx * 2 * poly_num * sizeof(linearpart_t), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy working_mat_copy from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_const_mat, const_mat_arr,
thidx * poly_num * sizeof(squarepart_t), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy const_mat from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("enum num : %d\nblock num : %d\nthread num : %d\n", ENUM_NUM,
BLOCK_NUM, THREAD_NUM);
//solve linear system on GPU
cudaEvent_t start1;
cudaEventCreate(&start1);
cudaEvent_t stop1;
cudaEventCreate(&stop1);
cudaEventRecord(start1, NULL);
printf("begin solve linear system!\n");
solveLinear<<<BLOCK_NUM, THREAD_NUM>>>(d_working_mat_copy, d_const_mat,
d_val, d_bound, d_sol_total,d_result);
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch solveLinear kernel (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t h_sol_total[thidx];
err = cudaMemcpy(h_sol_total, d_sol_total, thidx * sizeof(value_t),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy total solution numbers from device to host (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_result, d_result, thidx * RESULT_MAX_NUM * 2 * sizeof(value_t),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy result from device to host (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(bound_arr, d_bound, thidx * sizeof(char),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy bound from device to host (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//sort the results
for(int i = 0; i < thidx; i++){
for(int j = i + 1; j < thidx; j++){
if(bound_arr[i] > bound_arr[j]){
char temp = bound_arr[i];
bound_arr[i] = bound_arr[j];
bound_arr[j] = temp;
for(int ri = 0;ri < 2 * RESULT_MAX_NUM; ri ++){
value_t temp = h_result[i * 2 * RESULT_MAX_NUM + ri];
h_result[i * 2 * RESULT_MAX_NUM + ri] = h_result[j * 2 * RESULT_MAX_NUM + ri];
h_result[j * 2 * RESULT_MAX_NUM + ri] = temp;
}
}
}
}
// printf("\n------------ finish sort ---------------\n");
//write the result of each threads
printf("bound:%d\n",bound_arr[thidx-1]);
FILE *out = fopen("result.txt","a+");
for(int i = thidx - 1;i >= 0;i--){
fprintf(out,"bound:%d\n",bound_arr[i]);
for(int j = 0; j < RESULT_MAX_NUM; j++){
fprintf(out,"%d. val:%lu, sol:%lu\n",j+1, h_result[i * 2 * RESULT_MAX_NUM + j * 2],h_result[i*2 * RESULT_MAX_NUM + j * 2 + 1]);
}
if(bound_arr[i] > bound_arr[i-1]){
break;
}
}
printf("time:%.3lf ms\n---------------------------------------\n", msecTotal1);
cudaFree(d_working_mat_copy);
cudaFree(d_const_mat);
cudaFree(d_val);
cudaFree(d_bound);
cudaFree(d_sol_total);
cudaFree(d_result);
}
|
19,191 | #include<stdlib.h>
#include<stdio.h>
#include<math.h>
#include<cuda.h>
__global__ void add(float *a , float *b)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
b[id] = sinf(a[id]);
}
int main(void)
{
float *a , *b ;
float *d_a , *d_b ;
printf("Enter the value of N \n");
int n;
int i;
scanf("%d",&n);
a = (float*)malloc(sizeof(float)*n);
b = (float*)malloc(sizeof(float)*n);
printf("Enter the values for 1st Array \n");
for( i = 0;i<n;i++)
scanf("%f",&a[i]);
int size = sizeof(float)*n;
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
add<<<1,n>>>(d_a,d_b);
cudaMemcpy(b,d_b,size,cudaMemcpyDeviceToHost);
printf("Result \n");
for( i = 0;i<n;i++)
printf("%f \t",b[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
19,192 | //Referred Dr.Swenson's Sample code and Nvidia PDF for some code syntaxes and Excerpts. File read logic reference taken from online sources
//like geeks for geeks and cplusplus.com.
/*
Akshaya Nagarajan
ECE 6122 P2
GTID: 903319262
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <vector>
#include <string>
#include <fstream>
#include <sstream>
#define T_P_B 1024 //Threads per block for cuda kernel
//struct to store values from conf file
struct init_values {
std::string dimension;
float k;
int timesteps, width, height, depth;
float default_temp;
std::vector<int> heatsource;
std::vector<float> fixed_temp;
};
struct init_values init;
//Logic to read conf file
void init_readconf(std::string conf)
{
std::ifstream conf_file(conf.c_str());
std::string line;
std::vector<std::string> conf_vector;
while (std::getline(conf_file, line)) {
conf_vector.push_back(line);
}
std::vector<std::string> temp;
std::vector<int> temp2;
std::vector<float> temp3;
for (int i = 0; i < conf_vector.size(); ++i)
{
if ((conf_vector[i].empty() == 0) && (conf_vector[i].find("#") != 0))
{
temp.push_back(conf_vector[i]);
}
}
//initialize values non comma separated
std::stringstream dim0(temp[0]);
dim0 >> init.dimension;
std::stringstream dim1(temp[1]);
dim1 >> init.k;
std::stringstream dim2(temp[2]);
dim2 >> init.timesteps;
std::stringstream dim3(temp[4]);
dim3 >> init.default_temp;
//initialize values comma separated
if (init.dimension == "2D")
{
std::stringstream dim4;
dim4 << temp[3];
int a;
//First get the total grid size
while(dim4 >> a)
{
if (dim4.peek() == ',')
{
dim4.ignore();
}
temp2.push_back(a);
}
init.width = temp2[0];
init.height = temp2[1];
for (int i = 5; i < temp.size(); ++i)
{
std::stringstream dim5;
dim5 << temp[i];
float b;
//Get the heat source values
while(dim5 >> b)
{
if (dim5.peek() == ',')
{
dim5.ignore();
}
temp3.push_back(b);
}
}
//Put heat source values into vectors
for (int i = 0; i < temp3.size(); ++i)
{
init.heatsource.push_back(temp3[i]);
}
for (int i = 4; i < temp3.size(); i= i+5)
{
init.fixed_temp.push_back(temp3[i]);
}
for (int i = 4; i < init.heatsource.size(); i= i+4)
{
init.heatsource.erase(init.heatsource.begin() + i);
}
}
else {
std::stringstream dim4;
dim4 << temp[3];
int a;
//First get the total grid size
while(dim4 >> a)
{
if (dim4.peek() == ',')
{
dim4.ignore();
}
temp2.push_back(a);
}
init.width = temp2[0];
init.height = temp2[1];
init.depth = temp2[2];
for (int i = 5; i < temp.size(); ++i)
{
std::stringstream dim5;
dim5 << temp[i];
float b;
//Get the heat source values
while(dim5 >> b)
{
if (dim5.peek() == ',')
{
dim5.ignore();
}
temp3.push_back(b);
}
}
//Put heat source values into vectors
for (int i = 0; i < temp3.size(); ++i)
{
init.heatsource.push_back(temp3[i]);
}
for (int i = 6; i < temp3.size(); i= i+7)
{
init.fixed_temp.push_back(temp3[i]);
}
for (int i = 6; i < init.heatsource.size(); i= i+6)
{
init.heatsource.erase(init.heatsource.begin() + i);
}
}
}
//kernel function for 2D
__global__ void twodfunc(float *arraymain, float *arraytemp, float *arraybool, float k, int width, int height, int N) {
int idx = threadIdx.x + blockIdx.x * blockDim.x; //Get thread Ids
if (idx < N)
{
float top = arraymain[idx + width];
float bottom = arraymain[idx - width];
float left = arraymain[idx -1];
float right = arraymain[idx +1];
//Heat Diffusion formula for 8 corner and 1 general case in 2D
//for 1st element
if (idx == 0)
{
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(arraymain[idx] + top + arraymain[idx] + right - 4*arraymain[idx]));
}
//for last element
else if (idx == (width*height -1))
{
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(arraymain[idx] + bottom + arraymain[idx] + left - 4*arraymain[idx]));
}
//for leftcorner top
else if ((idx + width == width*height) && (idx%width == 0))
{
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(arraymain[idx] + bottom + arraymain[idx] + right - 4*arraymain[idx]));
}
//for rightcorner bottom
else if ((idx - width < 0) && (idx%width == (width-1)))
{
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(arraymain[idx] + top + arraymain[idx] + left - 4*arraymain[idx]));
}
//for top
else if (idx + width > width*height)
{
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(arraymain[idx] + bottom + left + right - 4*arraymain[idx]));
}
//for bottom
else if (idx - width < 0)
{
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(arraymain[idx] + top + left + right - 4*arraymain[idx]));
}
//for left
else if (idx%width == 0)
{
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(arraymain[idx] + top + bottom + right - 4*arraymain[idx]));
}
//for right
else if (idx%width == (width-1))
{
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(arraymain[idx] + top + left + bottom - 4*arraymain[idx]));
}
//general cases
else
{
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(top + bottom + left + right - 4*arraymain[idx]));
}
}
}
//kernel function for 3D
__global__ void threedfunc(float *arraymain, float *arraytemp, float *arraybool, float k, int width, int height, int depth, int N) {
int idx = threadIdx.x + blockIdx.x * blockDim.x; //Get thread Ids
if (idx < N)
{
//Setting by default to its own values for corner cases
float top = arraymain[idx];
float bottom = arraymain[idx];
float left = arraymain[idx];
float right = arraymain[idx];
float front = arraymain[idx];
float back = arraymain[idx];
//index computation for non corner cases (in order to avoid many loops covering the individual cases)
int index;
//for top
index = idx + width*depth;
if (index < N)
{
top = arraymain[index];
}
//for bottom
index = idx - width*depth;
if (index >= 0)
{
bottom = arraymain[index];
}
//for front
index = idx%(width*depth);
index = index/width;
if (index != 0)
{
front = arraymain[idx - width];
}
//for back
if (index != (depth-1))
{
back = arraymain[idx + width];
}
//for left
index = idx%width;
if (index != 0)
{
left = arraymain[idx-1];
}
//for right
if (index != width-1)
{
right = arraymain[idx+1];
}
//general formula for heat diffusion 3D with calculated indexes
arraytemp[idx] = arraymain[idx] + arraybool[idx]*(k*(front + back + top + bottom + left + right - 6*arraymain[idx]));
}
}
//main
int main(int argc, char* argv[]) {
init_readconf(argv[1]);
//For 2D
if (init.dimension == "2D")
{
char *filename = (char *)"heatOutput.csv";
FILE *fp;
fp = fopen(filename, "w");
float size = (init.width*init.height) * sizeof(float);
int N = init.width*init.height;
float a[N], b[N], c[N];
float *d_a, *d_b, *d_c;
for (int i = 0; i < N; ++i)
{
a[i] = init.default_temp;
b[i] = 0;
c[i] = 1;
}
//logic for conf file and array integration 2D
int index = 0;
for (int i = 0; i < init.heatsource.size(); i=i+4)
{
for (int j = init.heatsource[i+1]; j < init.heatsource[i+1]+init.heatsource[i+3]; j++)
{
for (int k = init.heatsource[i]; k < init.heatsource[i]+init.heatsource[i+2]; k++)
{
a[j*init.width + k] = init.fixed_temp[index];
c[j*init.width + k] = 0;
}
}
index++;
}
//initialize array for device
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//copy to device from host
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice);
float *swap;
//Looping timesteps number of times to get the final grid
for (int i = 0; i < init.timesteps; i++)
{
twodfunc<<<(N + T_P_B-1) / T_P_B, T_P_B>>>(d_a, d_b, d_c, init.k, init.width, init.height, N); //Call kernel function
cudaDeviceSynchronize();
swap = d_a;
d_a = d_b;
d_b = swap;
}
//copy to host from device
cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
// //Print the Final Grid to CSV file
for(int i = 0; i < N; i++) {
if(i!=N-1 && i%init.width == init.width-1 && i != 0)
fprintf(fp, "%f\n", a[i]);
else if ((i==0) || (i!=N-1 && i%init.width !=init.width-1))
fprintf(fp, "%f, ", a[i]);
else fprintf(fp, "%f\n", a[i]);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
fclose(fp);
}
//For 3D
else
{
char *filename = (char *)"heatOutput.csv";
FILE *fp;
fp = fopen(filename, "w");
float size = (init.width*init.height*init.depth) * sizeof(float);
int N = init.width*init.height*init.depth;
float a[N], b[N], c[N];
float *d_a, *d_b, *d_c;
for (int i = 0; i < N; ++i)
{
a[i] = init.default_temp;
b[i] = 0;
c[i] = 1;
}
//logic for conf file and array integration 3D
int index = 0;
for (int i = 0; i < init.heatsource.size(); i=i+6)
{
for (int p = init.heatsource[i+1]; p < init.heatsource[i+1]+init.heatsource[i+4]; p++)
{
for (int k = init.heatsource[i+2]; k < init.heatsource[i+2]+init.heatsource[i+5]; k++)
{
for (int j = init.heatsource[i]; j < init.heatsource[i]+init.heatsource[i+3]; j++)
{
a[p*init.width*init.depth + j + k*init.width] = init.fixed_temp[index];
c[p*init.width*init.depth + j + k*init.width] = 0;
}
}
}
index++;
}
//initialize array for device
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//copy to device from host
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice);
float *swap;
//Looping timesteps number of times to get the final grid
for (int i = 0; i < init.timesteps; i++)
{
threedfunc<<<(N + T_P_B-1) / T_P_B, T_P_B>>>(d_a, d_b, d_c, init.k, init.width, init.height, init.depth, N); //Call kernel function
cudaDeviceSynchronize();
swap = d_a;
d_a = d_b;
d_b = swap;
}
//copy to host from device
cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
//Print the Final Grid to CSV file
for (int k = 0; k < init.depth; k++)
{
for (int p = 0; p < init.height; p++)
{
for (int j = 0; j < init.width; j++)
{
if (j == init.width-1)
fprintf(fp, "%f\n", a[p*init.width*init.depth + j + k*init.width]);
else fprintf(fp, "%f, ", a[p*init.width*init.depth + j + k*init.width]);
}
}
fprintf(fp, "\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
fclose(fp);
}
return 0;
}
|
19,193 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <iostream>
using namespace std;
#define arraySize 4
__global__ void MatMul(float *C, float *A, float *B, int width, int b_width, int data_len)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id > data_len) {
unsigned int w = id / b_width;
unsigned int h = id % b_width;
float sum = 0.0F;
for (int j = 0; j < width; j++) {
sum += A[w * width + j] * B[j * b_width + h];
}
*(C + id) = sum;
}
}
__global__ void ReLU(float *C, float *A, int data_len)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id < data_len) {
if (A[id] > 0) {
C[id] = A[id];
}
else {
C[id] = 0;
}
}
}
__device__ float maxx(float a, float b) {
if (a > b) {
return a;
}
return b;
}
__global__ void MaxPooling(float *data, float *inputs, int size, int out_height, int out_width, int in_width, int in_height, int in_channels)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
int bs = id / (in_channels * out_height * out_width);
int ow = (id - bs * in_channels * out_height * out_width) / (in_channels * out_height);
int oh = (id - bs * in_channels * out_height * out_width - ow * in_channels * out_height)/ in_channels;
int ch = id - bs * in_channels * out_height * out_width - ow * in_channels * out_height - oh * in_channels;
float mmax = inputs[ch + ow*size*in_height*in_channels + oh*size*in_channels + bs*in_channels*in_width * in_height];
for (int ws = 0; ws < size; ws++) {
for (int wh = 0; wh < size; wh++) {
mmax = maxx(mmax, inputs[ch + (ow*size + ws)*in_height*in_channels + (oh*size + wh)*in_channels
+ bs*in_channels*in_width * in_height]);
}
}
data[id] = mmax;
}
__global__ void CONV(float *out_data, float *input,float *weight, int kernel_size,int out_width,int out_height,int out_channels,
int in_width, int in_height, int in_channels)
{
int id = threadIdx.x + blockDim.x * blockIdx.x;
int bs = id / (out_channels * out_height * out_width);
int ow = (id - bs * out_channels * out_height * out_width) / (out_channels * out_height);
int oh = (id - bs * out_channels * out_height * out_width - ow * out_channels * out_height) / out_channels;
int co = id - bs * out_channels * out_height * out_width - ow * out_channels * out_height -oh * out_channels;
float result = 0;
for (int ci = 0; ci < in_channels; ci++) {
for (int kw = 0; kw < kernel_size; kw++) {
for (int kh = 0; kh < kernel_size; kh++) {
result +=
input[bs*in_height*in_channels*in_width + (ow * kernel_size + kw) * in_height*in_channels +
(oh * kernel_size + kh)* in_channels + ci]
* weight[co * in_channels*kernel_size*kernel_size + ci *kernel_size*kernel_size + kw * kernel_size + kh];
}
}
}
out_data[id] = result;
}
int main()
{
float A[arraySize][arraySize] = { { 1, 2, 3 , 4 } , { 1, 2, 3 , 4},{ 4, 5 ,6 ,7},{ 7, 8, 9, 10 } };
float weight[arraySize / 2][arraySize / 2] = { {1, 1}, {1, 1} };
//float B[arraySize][arraySize] = { { 1, 0, 0 },{ 0, 1 ,0 },{ 0, 0, 1 } };
//float C[arraySize][arraySize] = { { 1, 1, 1 },{ 1, 1 , 1 },{ 1, 1, 1 } };
float D[arraySize/2][arraySize/2] = { { 0,0 },{ 0,0 }};
float *dev_A;
float *dev_D;
float *dev_weight;
/*float *dev_B;
float *dev_C;*/
cudaMalloc(&dev_A, arraySize*arraySize * sizeof(float));
cudaMalloc(&dev_D, arraySize*arraySize / 4 * sizeof(float));
cudaMalloc(&dev_weight, arraySize*arraySize/4 * sizeof(float));
//cudaMalloc(&dev_B, arraySize*arraySize * sizeof(float));
//cudaMalloc(&dev_C, arraySize*arraySize * sizeof(float));
cudaMemcpy(dev_A, A, arraySize*arraySize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_weight, weight, arraySize*arraySize/4 * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_B, B, arraySize*arraySize * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_C, C, arraySize*arraySize*sizeof(float), cudaMemcpyHostToDevice);
//MatMul << <1, arraySize*arraySize >> >(dev_C, dev_A, dev_B, 3, 3);
//ReLU << <1, arraySize*arraySize >> > (dev_C, dev_A, arraySize*arraySize);
//MaxPooling<<<1, 4>>>(dev_D, dev_A, 2, 2, 2, 4, 4, 1);
CONV<<<1, 4>>>(dev_D, dev_A, dev_weight, 2, 2, 2, 1, 4, 4, 1);
cudaMemcpy(D, dev_D, arraySize*arraySize/4 * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < arraySize/2; i++)
{
for (int j = 0; j < arraySize/2; j++)
{
printf("C[%d][%d] = %f \t", i, j, D[i][j]);
}
printf("\n");
}
cudaFree(dev_D);
cudaFree(dev_A);
//cudaFree(dev_B);
}
|
19,194 | #include<fstream>
#include<stdio.h>
#include<iostream>
long long int read_file_to_memmory(FILE *pInfile , int *pPointer)
{
if(pInfile != NULL)
{
int mIndex =0;
int mSize = fread(pPointer+mIndex,1,sizeof(int),pInfile);
long long int mFileSize=0;
while(mSize!= 0)
{
mFileSize = mFileSize +mSize;
++mIndex;
mSize = fread(pPointer+mIndex,1,mSize,pInfile);
}
return mFileSize;
}
return 0;
}
long long int write_file_from_memmory(FILE *pOutFile , int *pPointer,long long int pFileSize)
{
if(pOutFile!=NULL)
{
pFileSize = fwrite(pPointer,1,pFileSize,pOutFile);
return pFileSize;
}
return 0;
}
__global__ void generate_decrypted(int *pDataPointer , int *pRandomData , int *pEncryptedData , long long int pSize)
{
long long int index = blockIdx.x * blockDim.x + threadIdx.x;
if( index <=(pSize /sizeof(int) ))
{
(*(pEncryptedData+index)) = (*(pDataPointer+ index))^(*(pRandomData+index));
}
else
return;
}
int main(int argc , char *argv[])
{
FILE *inFile;
FILE *outFile;
FILE *keyFile;
inFile = fopen("enc","rb");
keyFile = fopen("key","rb");
outFile = fopen(argv[1],"wb");
int *encryptedDataPointer = new int[268435456];
long long int fileSize = read_file_to_memmory(inFile,encryptedDataPointer);
int *keyDataPointer = new int[fileSize/sizeof(int) +100];
int *decryptedDataPointer = new int[fileSize/sizeof(int) +100];
fileSize = read_file_to_memmory(keyFile,keyDataPointer);
int *d_encryptedDataPointer;
cudaMalloc((void**)&d_encryptedDataPointer,fileSize);
int *d_keyPointer;
cudaMalloc((void**)&d_keyPointer,fileSize);
int *d_decryptedDataPointer;
cudaMalloc((void**)&d_decryptedDataPointer,fileSize);
cudaMemcpy(d_encryptedDataPointer,encryptedDataPointer,fileSize,cudaMemcpyHostToDevice);
cudaMemcpy(d_keyPointer , keyDataPointer,fileSize,cudaMemcpyHostToDevice);
generate_decrypted<<<fileSize/64 +1 ,64>>>(d_encryptedDataPointer , d_keyPointer , d_decryptedDataPointer ,fileSize);
cudaMemcpy(decryptedDataPointer,d_decryptedDataPointer,fileSize,cudaMemcpyDeviceToHost);
fileSize = write_file_from_memmory(outFile,decryptedDataPointer,fileSize);
fclose(inFile);
fclose(outFile);
fclose(keyFile);
}
|
19,195 | #include "includes.h"
__global__ void kLogisticGrad(float* mat, float* targets, float* out_grad, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
out_grad[i] = (targets[i] < 0) ? 0 : (mat[i] - targets[i]);
}
} |
19,196 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cuComplex.h>
#include <assert.h>
#include <cufft.h>
#include <cstdlib>
#include <cstring>
#define DEBUG
using namespace std;
const int N = 2e5 + 10;
int t, n;
__constant__ int T[1];
inline cudaError_t checkCuda(cudaError_t result){
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void vector_mul(cufftDoubleComplex *a, cufftDoubleComplex *b){
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < T[0]; i += numThreads) {//why can't i use *T in here
cuDoubleComplex c = cuCmul(a[i], b[i]);
a[i] = make_cuDoubleComplex(cuCreal(c) / T[0], cuCimag(c) / T[0]);
}
}
__global__ void get_ans(int *ans, cufftDoubleComplex *a) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < T[0]; i += numThreads)//why can't i use *T in here
ans[i] = (int)(cuCreal(a[i]) + 0.5);
}
int main(){
cudaDeviceProp prop;
checkCuda(cudaGetDeviceProperties(&prop, 0));
//timing start
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//intialize
FILE *in=fopen("fft.in","r"), *out=fopen("cufft.out","w");
fscanf(in, "%d", &n);
t = 1; while (t < n + n) t <<= 1;
//memory allocation
int tt[1]; tt[0] = t;
checkCuda(cudaMemcpyToSymbol(T, tt, sizeof(int)));
int size = sizeof(cufftDoubleComplex)*t, size2 = sizeof(int)*t;
int* h_ans = (int*)calloc(t, sizeof(int));
char* str = (char*)malloc(sizeof(char)*(n + 10));
int* ans;
checkCuda(cudaMalloc((void **)&ans, size2));
cufftDoubleComplex *a, *b;
cufftDoubleComplex *h_a = (cufftDoubleComplex *)calloc(t, sizeof(cufftDoubleComplex));
cufftDoubleComplex *h_b = (cufftDoubleComplex *)calloc(t, sizeof(cufftDoubleComplex));
checkCuda(cudaMalloc((void **)&a, size));
checkCuda(cudaMalloc((void **)&b, size));
//input and memcpy
fscanf(in, "%s", str); for (int i = 0; i < n; i++) h_a[i] = make_cuDoubleComplex((double)str[n - i - 1] - '0', 0.0);
fscanf(in, "%s", str); for (int i = 0; i < n; i++) h_b[i] = make_cuDoubleComplex((double)str[n - i - 1] - '0', 0.0);
checkCuda(cudaMemcpy(a, h_a, size, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(b, h_b, size, cudaMemcpyHostToDevice));
//dft
cufftHandle plan;
if (cufftPlan1d(&plan, t, CUFFT_Z2Z, 1) != CUFFT_SUCCESS) {
fprintf(stderr, "cufft plan create failed!");
return 1;
}
if (cufftExecZ2Z(plan, a, a, CUFFT_FORWARD) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: a ExecZ2Z Forward failed");
return 2;
}
if (cufftExecZ2Z(plan, b, b, CUFFT_FORWARD) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: b ExecZ2Z Forward failed");
return 2;
}
//multiply
vector_mul<<<t / prop.maxThreadsPerBlock + 1, prop.maxThreadsPerBlock>>>(a, b);
checkCuda(cudaDeviceSynchronize());
//idft
if (cufftExecZ2Z(plan, a, a, CUFFT_INVERSE) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: a ExecZ2Z Inverse failed");
return 4;
}
if (cufftDestroy(plan) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: fft plan destroy failed");
return 5;
}
//change into integer: serial or parallel??
checkCuda(cudaMemcpy(h_a, a, size, cudaMemcpyDeviceToHost));
//for (int i = 0; i<t; i++) ans[i] = (int)(cuCreal(h_a[i]) + 0.5);
//for (int i = 0; i<t; i++) ans[i + 1] += ans[i] / 10, ans[i] %= 10;
get_ans<<<t / prop.maxThreadsPerBlock + 1, prop.maxThreadsPerBlock >>>(ans, a);
checkCuda(cudaMemcpy(h_ans, ans, size2, cudaMemcpyDeviceToHost));
for (int i = 0; i<t; i++) h_ans[i + 1] += h_ans[i] / 10, h_ans[i] %= 10;
//output
while (!h_ans[t-1]) t--;
for (int i = t-1; i >= 0; i--) fprintf(out, "%d", h_ans[i]);
//delete
checkCuda(cudaFree(a));
checkCuda(cudaFree(b));
checkCuda(cudaFree(ans));
free(h_ans);
free(h_a);
free(h_b);
free(str);
//timing end
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("%lf\t", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
if (checkCuda(cudaDeviceReset()) != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 10;
}
return 0;
} |
19,197 | #ifndef _AgentProperty_
#define _AgentProperty_
#include <limits>
#include <stdio.h>
#include <math.h>
#include "Vector3D.cu"
__device__ const float MAX_FORCE = 0.05f;
__device__ const float MAX_SPEED = 0.8f;
class AgentProperty
{
public:
Vector3D position;
Vector3D velocity;
Vector3D force;
__host__ __device__ AgentProperty(Vector3D position, Vector3D velocity)
{
this->position = position;
this->velocity = velocity;
}
__host__ __device__ AgentProperty()
{
}
__host__ __device__ inline Vector3D Vec3DNormalize(Vector3D v)
{
Vector3D vec;
float vector_length = v.length();
//if (vector_length > std::numeric_limits<float>::epsilon())
//{
vec.x = v.x / vector_length;
vec.y = v.y / vector_length;
vec.z = v.z / vector_length;
//printf("vec --- %f\n", vec.x);
//}
return vec;
}
__host__ __device__ void AgentMovement(float vVerts[], Vector3D vector, float verticeLenth)
{
vVerts[0] = -vector.x; vVerts[1] = vector.y; vVerts[2] = vector.z;
vVerts[3] = -(vector.x-verticeLenth); vVerts[4] = vector.y; vVerts[5] = vector.z;
vVerts[6] = -vector.x; vVerts[7] = (vector.y-verticeLenth); vVerts[8] = vector.z;
vVerts[9] = -(vector.x-verticeLenth); vVerts[10] = (vector.y-verticeLenth); vVerts[11] = vector.z;
}
__device__ Vector3D calculate_steering(Vector3D target)
{
Vector3D desired_velocity;
Vector3D teste;
desired_velocity = Vec3DNormalize(target - this->position) * MAX_SPEED;
//printf("Position --- %f\n", target.x);
//printf("Desired --- %f\n", desired_velocity.x);
teste = desired_velocity - this->velocity;
//printf("Velocity --- %f\n", this->velocity.x);
//printf("Desired 2 --- %f\n", teste.x);
return (desired_velocity - this->velocity);
}
/*void AgentMovement(float vVerts[], Vector3D vector, float verticeLenth)
{
vVerts[0] = -vector.x; vVerts[1] = vector.y; vVerts[2] = vector.z;
vVerts[3] = -(vector.x-verticeLenth); vVerts[4] = vector.y; vVerts[5] = vector.z;
vVerts[6] = -vector.x; vVerts[7] = (vector.y-verticeLenth); vVerts[8] = vector.z;
vVerts[9] = -(vector.x-verticeLenth); vVerts[10] = (vector.y-verticeLenth); vVerts[11] = vector.z;
}*/
};
#endif |
19,198 | /*
For DIRECTED GRAPH
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <string>
#include <algorithm>
using namespace std;
/***all macros**/
#define MAX_NODE 100000000
#define DEBUG 1
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**** device Code *******/
// __device__ volatile int Cx[MAX_NODE];
__device__ volatile int PQ[MAX_NODE];
//K in parallel
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
__global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent,volatile int* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag,int* PQ_size,
int flagDiff,int* diff_off,int* diff_edge,unsigned int* diff_weight,int dE ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
//diff expand
if(flagDiff){
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start<end){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
//end diff
}//end
}
//K in parallel -- O(N)
__global__ void keepHeapPQ(int* PQ_size,int* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
__global__ void checkMIN(int* PQ_size,int* flagEnd,int* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
__global__ void getCx(int* Cx,int dest,int* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
/**** main function ****/
int main(){
//the K PQ
int K ;
scanf("%d\n",&K);
int startNode,endNode;
scanf("%d %d",&startNode,&endNode);
FILE* fgraph = fopen("graph_cg.txt","r");
int N,E;
fscanf(fgraph,"%d %d\n",&N,&E);
int* H_offset = (int*)malloc(sizeof(int)*N);
int* H_edges = (int*)malloc(sizeof(int)*E);
unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E);
int* H_hx = (int*)malloc(sizeof(int)*N);
int* H_cx = (int*)malloc(sizeof(int)*N);
int* H_parent = (int*)malloc(sizeof(int)*N);
int* H_parent_old = (int*)malloc(sizeof(int)*N);
int* H_PQ = (int*)malloc(sizeof(int)*N);
int* H_openList = (int*)malloc(sizeof(int)*N);
int* H_PQ_size = (int*)malloc(sizeof(int)*K);
//for cost of endNode
int* H_dest_cost = (int*)malloc(sizeof(int));
memset(H_PQ_size,0,sizeof(int)*K);
memset(H_openList,-1,sizeof(int)*N);
//init cx
for(int i=0;i<N;i++){
H_cx[i]=INT_MAX;
H_parent[i]=-1;
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%d",&H_edges[i]);
}
for(int i=0;i<N;i++){
fscanf(fgraph,"%d",&H_offset[i]);
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%u",&H_weight[i]);
}
FILE* fhx = fopen("Hx.txt","r");
for(int i=0;i<N;i++){
int temp;
fscanf(fhx,"%d",&temp);
if(temp!=-1)
H_hx[i]= temp;
else
H_hx[i] = 0; //to change
}
fclose(fgraph);
fclose(fhx);
printf("[INFO] completed taking input\n");
//init Host var
int* H_flagEnd = (int*)malloc(sizeof(int));
int* H_flagfound = (int*)malloc(sizeof(int));
int* H_a0 = (int*)malloc(sizeof(int));
int* H_nV_size = (int*)malloc(sizeof(int));
int* H_nV = (int*)malloc(sizeof(int)*N);
//required coz if many tries to add same in diff threads high low lower
int* H_nVFlag = (int*)malloc(sizeof(int)*N);
memset(H_nVFlag,-1,sizeof(int)*N);
*H_flagEnd = 0;
*H_flagfound = 0;
*H_a0 = 0;
//insert startNode in PQ[0]
H_cx[startNode]=H_hx[startNode];
H_PQ[0]=startNode;
H_PQ_size[0]=1;
H_openList[startNode]=0;
//create events to record runtime
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//graph struture
int* D_offset;
int* D_edges ;
unsigned int* D_weight;
int* D_hx;
int* D_parent;
//for reading the ancessostor to avoid lock for write after read.
int* D_parent_old;
//Priority queue size
int* D_PQ_size;
//CX
int* D_Cx;
//flag if in openList(contains which PQ)
int* D_openList;
//lock for nodes
int* D_lock;
//Diff structure
int* D_diff_edges;
int* D_diff_offset;
unsigned int* D_diff_weight;
//next nodes flag
int* D_nVFlag;
//next nodes array to insert PQ
int* D_nV;
int* D_nV_size;
//nodes to be expanded ( extracted from PQ )
int* D_expandNodes;
int* D_expandNodes_size;
//flag to end while loop and found the destination
int* D_flagEnd;
int* D_flagfound;
//cost of endNode
int* D_dest_cost;
gpuErrchk ( cudaMalloc(&D_offset,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E) );
gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E) );
gpuErrchk ( cudaMalloc(&D_hx,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_parent,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_parent_old,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_Cx,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_PQ_size,sizeof(int)*K) );
gpuErrchk ( cudaMalloc(&D_openList,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_lock,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_dest_cost,sizeof(int)) );
//for next set of vertices to add in PQ
gpuErrchk ( cudaMalloc(&D_nV,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_nV_size,sizeof(int)) );
gpuErrchk ( cudaMalloc(&D_nVFlag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( cudaMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( cudaMalloc(&D_expandNodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( cudaMalloc(&D_flagEnd,sizeof(int)) );
gpuErrchk( cudaMalloc(&D_flagfound,sizeof(int)) );
gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_hx,H_hx,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_Cx,H_cx,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemset(D_lock,0,sizeof(int)*N) );
int flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
int numThreads = 512;
int numBlocks = (K+numThreads-1)/numThreads;
int N_numBlocks = (N+numThreads-1)/numThreads;
if(DEBUG)
printf("[INFO] A* started\n");
cudaEventRecord(start);
//DO A* initailly on whole graph
while(*H_flagEnd==0 && flag_PQ_not_empty==1){
//extract min
extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_Cx,D_openList,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent,D_Cx,
D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList,
N,E,K,endNode,D_nVFlag,D_PQ_size,
false,D_diff_offset,D_diff_edges,D_diff_weight,0);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_Cx,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//gen from flag D_nV
//for N in parallel
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,D_Cx,K,N,D_openList);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//cpy flagend and flagEmpty
gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *H_flagfound==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,D_Cx,endNode,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) );
}
}
getCx<<<1,1>>>(D_Cx,endNode,D_dest_cost);
gpuErrchk( cudaMemcpy(H_dest_cost,D_dest_cost, sizeof(int),cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("RUN TIME: %f\n",milliseconds);
gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
vector<int> Path;
printf("[OUT] Cost: %d\n",*H_dest_cost);
printf("[OUT] Path(in reverse): ");
if(*H_dest_cost!=INT_MAX){
int p = endNode;
while(H_parent[p]!=-1){
printf("%d ",p);
p = H_parent[p];
}
printf("%d\n",p);
}
else{
printf("not found\n");
}
}
|
19,199 | /**
* Implementation of a Simple Matrix Multiplication kernel using CUDA
*
* @author: Yvo Elling
* @date: 10-03-23
*/
#include <stdio.h>
#include <iostream>
#include <cstdint>
#include <chrono>
#include <array>
#define NROF_TEST_RUNS 500
#define MATRIX_WIDTH 8192
#define MATRIX_HEIGHT 8192
#define MATRIX_SIZE MATRIX_WIDTH * MATRIX_HEIGHT
#define BLOCK_DIM 1
#define PLACEHOLDER_NONE 1
typedef uint8_t RowIdx;
typedef uint8_t ColumnIdx;
using std::chrono::high_resolution_clock;
using std::chrono::duration_cast;
using std::chrono::duration;
using std::chrono::milliseconds;
template <class T>
__global__ void matrixMultiplicationCUDA(T* a, T* b, T* c) {
RowIdx row = blockIdx.y * blockDim.y + threadIdx.y;
ColumnIdx col = blockIdx.x * blockDim.x + threadIdx.x;
auto columnRowSum = 0.0f;
for (int i = 0; i < MATRIX_HEIGHT; ++i) {
columnRowSum += b[row * MATRIX_WIDTH + i] * a[i * MATRIX_HEIGHT + col];
}
c[row * MATRIX_WIDTH + col] = columnRowSum;
}
template <class T>
void matrixMultiplicationCPU(T* a, T* b, T* c) {
auto t1 = high_resolution_clock::now();
for (int i = 0; i < MATRIX_HEIGHT; ++i) {
float rowColumnSum = 0.0f;
for (int j = 0; j < MATRIX_WIDTH; ++j) {
rowColumnSum += b[i * MATRIX_HEIGHT + j] * a[j * MATRIX_WIDTH + i];
}
c[i * MATRIX_WIDTH + MATRIX_HEIGHT] = rowColumnSum;
}
auto t2 = high_resolution_clock::now();
int total_execution_time = duration_cast<milliseconds>(t2 - t1).count();
std::cout << "Total execution time on CPU is: " << total_execution_time << " ms" << std::endl;
}
int main (int argc, char** argv) {
std::cout << "Starting CUDA Application" << std::endl;
std::cout << "Launching CUDA Program for Matrix Multiplication" << std::endl;
int32_t * h_a = (int32_t *)calloc(MATRIX_SIZE, sizeof(int32_t));
int32_t * h_b = (int32_t *)calloc(MATRIX_SIZE, sizeof(int32_t));
int32_t * h_c = (int32_t *)calloc(MATRIX_SIZE, sizeof(int32_t));
int32_t *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, MATRIX_SIZE*sizeof(int32_t));
cudaMalloc((void**)&d_b, MATRIX_SIZE*sizeof(int32_t));
cudaMalloc((void**)&d_c, MATRIX_SIZE*sizeof(int32_t));
cudaMemcpy(d_a, h_a, MATRIX_SIZE*sizeof(int32_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, MATRIX_SIZE*sizeof(int32_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, MATRIX_SIZE*sizeof(int32_t), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 blocksPerGrid(BLOCK_DIM, PLACEHOLDER_NONE, PLACEHOLDER_NONE);
dim3 threadsPerBlock(MATRIX_WIDTH, MATRIX_HEIGHT, PLACEHOLDER_NONE);
std::array<float, NROF_TEST_RUNS> execution_times;
for (int i = 0; i < NROF_TEST_RUNS; ++i) {
cudaEventRecord(start);
matrixMultiplicationCUDA<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaMemcpy(d_c, h_c, MATRIX_SIZE*sizeof(int32_t), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0.0f;
cudaEventElapsedTime(&milliseconds, start, stop);
execution_times[i] = milliseconds;
}
float execution_time_sum = 0;
for (int i = 0; i < NROF_TEST_RUNS; ++i) {
execution_time_sum += execution_times[i];
}
float avg_execution_time = execution_time_sum / execution_times.size();
std::cout << "Total average kernel execution time is: " << avg_execution_time << "ms" << std::endl;
matrixMultiplicationCPU(h_a, h_b, h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
} |
19,200 | #include "includes.h"
__global__ void cu_divide(const float* numerator, float* dst, const float denominator, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(0 == denominator) dst[tid] = 0.0;
else dst[tid] = __fdividef(numerator[tid], denominator);
tid += stride;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.