serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
9,601 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
// Lattice dimensions and thread_num
#define thread_num 512// Must be 2^k
#define grid_dim_x 128
#define grid_dim_z 16384
#define grid_size 2097152
#define iter 250
#define iterbal 50
void calc_cpu(float B, float kT, float QX, float QZ, float *E_avg, float *M_avg, float *E_var, float *M_var);
__global__ void set_lattice(bool *lattice);
__global__ void iterate_grid(float B, float kT, float Q, float QZ, bool round, bool *dev_lattice, float *d_E_vec, float *d_M_vec, int seed);
__global__ void reset_vec(float *vec);
__global__ void vec_sum(float *vec, float *result);
__global__ void set_val(float *variable, float value);
__global__ void add_val(float *variable, float *addition);
__device__ int posMod(int number, int modulus);
__device__ int indexMap(int xi, int yi, int zi);
// Inn í Accumulator eru lesin gildi í úrtaki, haldið er utan um meðalgildi og
// dreifni þess úrtaks.
class Accumulator
{
private:
int N;
float m;
float s ;
// Fastayrðing gagna:
// N er fjöldi talna í því úrtaki sem hefur verið lesið inn í eintak af Accumulator, N >= 0
// m er meðaltal talna í því úrtaki sem hefur verið lesið inn í eintak af Accumulator
// s er summa ferningsfrávika (frávik sérhvers gildis frá meðaltali, í öðru veldi), í því
// úrtaki sem hefur verið lesið inn í eintak af Accumulator, s >= 0
public:
// N: Accumulator a;
// F: Ekkert
// E: a er nýtt eintak af Accumulator, sem engar tölur hafa lesnar inn í.
// Öll gögn í a hafa verið núllstillt, það er a.N = 0, a.m = 0.0 og a.s = 0.0
Accumulator() {
N = 0;
m = 0.0;
s = 0.0;
}
// N: a.addDataValue(x)
// F: Ekkert
// E: Búið er að bæta x í úrtakið a
void addDataValue(float x)
{
N++;
s = s + 1.0*(N-1)/N*(x-m)*(x-m);
m = m + (x-m)/N;
}
// N: x = a.mean()
// F: Ekkert
// E: x inniheldur meðaltal talna í úrtakinu a
float mean()
{
return m;
}
// N: x = a.var()
// F: N > 1
// E: x inniheldur dreifni talna í úrtakinu a
float var()
{
return s/(N-1);
}
// N: x = a.stddev()
// F: N > 1
// E: x inniheldur staðalfrávik talna í úrtakinu a
float stddev ( )
{
return sqrt(s/(N-1));
}
};
int main(){
// Minimum and maximum values of B, and number of steps.
// If Bsteps = 1, then only Bmin is used.
float B;
float Bmin = 0.0;
float Bmax = 1.0;
int Bsteps = 1;
// Minimum and maximum values of kT, and number of steps.
// If kTsteps = 1, then only kTmin is used.
float kT;
float kTmin = 0.5;
float kTmax = 7.0;
int kTsteps = 1;
// Minimum and maximum values of QY, and number of steps.
// If QYsteps = 1, then only Qmin is used.
float QY;
float QYmin = -1.0;
float QYmax = 1.0;
int QYsteps = 1;
// Minimum and maximum values of QZ, and number of steps.
// If QZsteps = 1, then only Qmin is used.
float QZ;
float QZmin = -1.0;
float QZmax = 1.0;
int QZsteps = 1;
srand(time(NULL)); // Seed CPU RNG
float Emean;
float Mmean;
float Evar;
float Mvar;
char filename[20];
sprintf(filename, "results.dat");
FILE *fp;
fp = fopen(filename, "w");
for (int i=0;i<Bsteps;i++){ // B loop
if (Bsteps>1){
B = Bmin + i*(Bmax-Bmin)/(Bsteps-1);
}
else{
B = Bmin;
}
for (int l=0; l<QZsteps; l++){ // QZ loop
if (QZsteps>1){
QZ = QZmin + l*(QZmax-QZmin)/(QZsteps-1);
}
else{
QZ = QZmin;
}
for(int k=0; k<QYsteps; k++){ // QY loop
if (QYsteps>1){
QY = QYmin + k*(QYmax-QYmin)/(QYsteps-1);
}
else{
QY = QYmin;
}
for(int j=0; j<kTsteps; j++){ // kT loop
if (kTsteps>1){
kT = kTmin + j*(kTmax-kTmin)/(kTsteps-1);
}
else{
kT = kTmin;
}
printf("Performing calculation at B=%g, kT=%g, QY=%g, QZ=%g\n", B, kT, QY, QZ);
calc_cpu(B, kT, QY, QZ, &Emean, &Mmean, &Evar, &Mvar);
fprintf(fp, "%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\t%.6e\n", B, kT, QY, QZ, Emean, Mmean, Evar, Mvar);
} // kT loop end
} // QY loop end
} // QZ loop end
} // B loop end
fclose(fp);
}
// U: calc_cpu(...)
// B: kT > 0, n => 0
// A: The results of an ising simulation at magnetic field B and
// temperature kT have been stored in Earr[n] (mean energy)
// and Marr[n] (mean magnetization)
void calc_cpu(float B, float kT, float QX, float QZ, float *E_avg_out, float *M_avg_out, float *E_var_out, float *M_var_out){
// Degbug things
// Template:
// cudaMemcpy( &buggy, dev_value, sizeof(float), cudaMemcpyDeviceToHost);
// printf("%g\n",buggy);
/*float buggy;*/
/*float buggyvec[thread_num];*/
// Create, allocate memory for and set lattice
bool *dev_lattice;
cudaMalloc( (void**)&dev_lattice, grid_size*sizeof(bool) );
set_lattice<<<1, thread_num>>>(dev_lattice);
float *dev_dEvec;
float *dev_dMvec;
cudaMalloc( (void**)&dev_dEvec, thread_num*sizeof(float) );
cudaMalloc( (void**)&dev_dMvec, thread_num*sizeof(float) );
float *dev_Etot;
float *dev_Mtot;
/*float *dev_Eavg;*/
/*float *dev_Mavg;*/
cudaMalloc( (void**)&dev_Etot, sizeof(float) );
cudaMalloc( (void**)&dev_Mtot, sizeof(float) );
/*cudaMalloc( (void**)&dev_Eavg, sizeof(float) );*/
/*cudaMalloc( (void**)&dev_Mavg, sizeof(float) );*/
set_val<<<1,1>>>(dev_Etot, grid_size*(-2.0-2.0*QX-2.0*QZ-B));
set_val<<<1,1>>>(dev_Mtot, grid_size);
/*set_val<<<1,1>>>(dev_Eavg, 0.0);*/
/*set_val<<<1,1>>>(dev_Mavg, 0.0);*/
Accumulator energy;
Accumulator magnet;
float Etot;
float Mtot;
for (int j=0; j<iter; j++){
reset_vec<<<1, thread_num>>>(dev_dEvec);
reset_vec<<<1, thread_num>>>(dev_dMvec);
iterate_grid<<<1, thread_num>>>(B, kT, QX, QZ, 0, dev_lattice, dev_dEvec, dev_dMvec, rand() );
iterate_grid<<<1, thread_num>>>(B, kT, QX, QZ, 1, dev_lattice, dev_dEvec, dev_dMvec, rand() );
vec_sum<<<1, thread_num>>>(dev_dEvec, dev_Etot);
vec_sum<<<1, thread_num>>>(dev_dMvec, dev_Mtot);
if (j>iterbal){
cudaMemcpy( &Etot, dev_Etot, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( &Mtot, dev_Mtot, sizeof(float), cudaMemcpyDeviceToHost);
Etot = Etot/grid_size;
Mtot = Mtot/grid_size;
energy.addDataValue(Etot);
magnet.addDataValue(Mtot);
}
}
*E_avg_out = energy.mean();
*M_avg_out = magnet.mean();
*E_var_out = energy.var();
*M_var_out = magnet.var();
cudaFree(dev_lattice);
cudaFree(dev_dEvec);
cudaFree(dev_dMvec);
cudaFree(dev_Etot);
cudaFree(dev_Mtot);
}
// U: set_lattice<<<1, thread_num>>>(dev_lattice);
// B: dev_lattice points to allocated device memory for grid_size bool numbers
// A: all elements of dev_lattice are set to 1
__global__ void set_lattice(bool *lattice){
int tid = threadIdx.x;
for (int i=tid;i<grid_size;i+=thread_num){
lattice[i] = 1;
}
}
// U: iterate_grid<<<1, thread_num>>>(...)
// B:
// A: One ising iteration has been performed over a checkerboard. If round=0 it's over the white squares, if round=1 it's over
// the black squares. The change done by each thread has been added to d_E_vec[tid] and d_M_vec[tid]
__global__ void iterate_grid(float B, float kT, float QX, float QZ, bool round, bool *dev_lattice, float *d_E_vec, float *d_M_vec, int seed){
int tid=threadIdx.x;
curandState_t state;
curand_init(seed+tid, 0, 0, &state);
int si;
float ssum;
float delta_E;
float delta_M;
float p;
float r;
int xi;
int yi;
int zi;
for (int i=round+2*tid;i<grid_size;i+=2*thread_num){
zi = i/grid_dim_z;
if ((zi%2)==0){
yi = (i%grid_dim_z)/grid_dim_x;
}
else{
yi = grid_dim_x-(i%grid_dim_z)/grid_dim_x-1;
}
if ((yi+zi)%2 == 0){
xi = i%grid_dim_x;
}
else{
xi = grid_dim_x - i%grid_dim_x - 1;
}
si = 2*dev_lattice[i]-1;
ssum = 2*dev_lattice[indexMap(xi-1,yi,zi)]
+2*dev_lattice[indexMap(xi+1,yi,zi)]
-2
+QX*2*dev_lattice[indexMap(xi,yi-1,zi)]
+QX*2*dev_lattice[indexMap(xi,yi+1,zi)]
-QX*2
+QZ*2*dev_lattice[indexMap(xi,yi,zi-1)]
+QZ*2*dev_lattice[indexMap(xi,yi,zi+1)]
-QZ*2;
delta_E = 2*si*(ssum+B);
delta_M = -2*si;
if (delta_E < 0){
p = 1;
}
else{
p = exp(-delta_E/kT);
}
r = curand_uniform(&state);
if (r<p){ // Spin flip!
d_E_vec[tid] += delta_E;
d_M_vec[tid] += delta_M;
dev_lattice[i] = !( dev_lattice[i] );
}
}
}
// U: reset_vec<<<1, thread_num>>>(dev_vec)
// B: dev_vec has been allocated device memory for thread_num float numbers
// A: All elements of dev_vec have been set as 0.0
__global__ void reset_vec(float *vec){
vec[threadIdx.x] = 0.0;
}
// U: vec_sum<<<1, thread_num>>>(dev_vec, dev_result)
// B: dev_vec has length thread_num
// A: The sum of elements in dev_vec has been added to result
__global__ void vec_sum(float *vec, float *result){
// Right multithread version (has to use threads)
int tid = threadIdx.x;
int offset = thread_num>>1;
while (offset>0){
if (tid < offset){
vec[tid] += vec[tid+offset];
}
__syncthreads();
offset=offset>>1;
}
if (tid==0){
*result += vec[0];
}
// Right single thread version
/*int tid = threadIdx.x;*/
/*if (tid == 0){*/
/*for (int i=1;i<thread_num;i++){*/
/*vec[0] += vec[i];*/
/*}*/
/**result += vec[0];*/
/*}*/
}
// U: set_val<<<1, 1>>>(variable, value)
// B:
// A: *variable = value
__global__ void set_val(float *variable, float value){
*variable = value;
}
// U: add_val<<<1, 1>>>(variable, addition)
// B:
// A: *variabe += *addition
__global__ void add_val(float *variable, float *addition){
*variable += *addition;
}
// U: z = posMod(n,m)
// B: m > 0
// A: z = n%m if n>=0, z = n%m + m if n < 0
__device__ int posMod(int number, int modulus){
int result = number%modulus;
if (result<0){
result +=modulus;
}
return result;
}
__device__ int indexMap(int xi, int yi, int zi){
xi = posMod(xi,grid_dim_x);
yi = posMod(yi,grid_dim_x);
zi = posMod(zi,grid_dim_x);
int i = zi*grid_dim_z;
if (zi%2==0){
i += yi*grid_dim_x;
}
else{
i += grid_dim_z-yi*grid_dim_x-grid_dim_x;
}
if ((yi+zi)%2 == 0){
i += xi;
}
else{
i += grid_dim_x-xi-1;
}
return i;
}
|
9,602 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
// const defines
#define NBIN 1000000000
#define NUM_BLOCK 4
#define NUM_THREAD 16
// struct to get time
struct timeval current_time = {0,0};
int tid;
float pi = 0, time_elapsed;
uint begin, end;
// function to calculate pi
__global__ void cal_pi(double *sum, int nbin, double step, int nthreads, int nblocks)
{
// var declaration
int i;
double x;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// pi equation calculation
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = i*step;
sum[idx] += double(4.0/(1.0+(x*x)));
}
}
int main()
{
// var declaration
double *sumDev;
double step = 1.0/NBIN; // dx
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(float);
// initializing file in append mode to insert experiment data
FILE *f = fopen("/home/aac-pc/Daniel/experimental-log.txt", "a");
// alooc space to acc variable
cudaMallocManaged(&sumDev, size);
// get initial time to evaluate performance
gettimeofday(¤t_time, NULL);
begin = current_time.tv_sec*1000000 + current_time.tv_usec;
// call function to calculate pi in threads
cal_pi<<<NUM_BLOCK, NUM_THREAD>>>(sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK);
// synchronize threads
cudaDeviceSynchronize();
// get final time to evaluate performance
gettimeofday(¤t_time, NULL);
end = current_time.tv_sec*1000000 + current_time.tv_usec;
time_elapsed = end - begin;
// calculate pi final value
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++){
pi += sumDev[tid];
}
pi *= step;
// print final value in console and save data info in log file
printf("PI = %f\n",pi);
fprintf(f, "%d;%d;%f;%f\n", NUM_THREAD, NUM_BLOCK, pi, (time_elapsed/1000000));
// free cuda var
cudaFree(sumDev);
// close file
fclose(f);
return 0;
} |
9,603 | /*
Parallel and Distributed Systems
\file v3.c
\brief Implementation for the Ising Model in CUDA
Multiple thread sharing common input moments
\authors Ioannis Gonidelis Dimitra Karatza
\AEMs 8794 8828
\date 2020-01-15
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
//Should be BLOCK_DIMENSION x GRID_DIMENSION = N
#define BLOCK_DIMENSION 11
#define BLOCK_CACHE BLOCK_DIMENSION
#define GRID_DIMENSION 47
#define N 517
//Careful on usage (use parenthesis when NEEDED)
//memory access periodic boundary conditions
#define gx(x) (x+n)%n
#define gy(y) (y+n)%n
#define sx(x) (x+BLOCK_CACHE+4)%(BLOCK_CACHE+4)
#define sy(y) (y+BLOCK_CACHE+4)%(BLOCK_CACHE+4)
void validation(int n,int k,int *expected,int *G){
int flag=0;
for(int v = 0; v < n*n; v++){
if(expected[v] != G[v]){
flag=-1;
break;
}
}
if(flag==0){
printf("\033[0;32m");
printf("k=%d: CORRECT ISING MODEL",k);
printf("\033[0m \n");
}else{
printf("k=%d: WRONG ISING MODEL\n",k);
}
}
__global__ void calc_moment(int *G, int* newG, double* w, int n){
//NOTE: gridDim.x = gridDim.y it's the same
__shared__ int sharedG[(BLOCK_CACHE+4)][(BLOCK_CACHE+4)]; //2D predefined shared memory
int fit = n/(gridDim.x*BLOCK_DIMENSION); //number of complete blocks that fit into G
//Global G indices
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x,y; //shared memory indices
int s_x,s_y; //neighbor shared indices
int thread_step_x= blockDim.x*gridDim.x;
double infl; //influence of neighbors on current moment
for(int iteration=0; iteration<(fit)*(fit); iteration++){
infl=0;
if(ix<N && iy<N){
//x,y=threadIdx.x,threadIdx.y
x=ix%BLOCK_CACHE;
y=iy%BLOCK_CACHE;
//Each thread loads shelf (one) moment
sharedG[x][y]=G[n*iy+ix];
//upper edge
if(threadIdx.y==0){
//upper left corner
if(threadIdx.x==0){
for(int i=0;i<3;i++){
for(int j=0;j<3;j++){
if(!(i==0 && j==0)){
sharedG[sx(x-i)][sy(y-j)]=G[n*(gy(iy-j))+gx(ix-i)];
}
}
}
}
//upper right corner
else if(threadIdx.x==BLOCK_CACHE-1){
for(int i=0;i<3;i++){
for(int j=0;j<3;j++){
if(!(i==0 && j==0)){
sharedG[sx(x+i)][sy(y-j)]=G[n*(gy(iy-j))+gx(ix+i)];
}
}
}
}
//upper edge non-corner threads
else{
sharedG[x][sy(y-1)]=G[n*(gy(iy-1))+ix];
sharedG[x][sy(y-2)]=G[n*(gy(iy-2))+ix];
}
}
//bottom edge
if(threadIdx.y==BLOCK_CACHE-1){
//bottom right corner
if(threadIdx.x==BLOCK_CACHE-1){
for(int i=0;i<3;i++){
for(int j=0;j<3;j++){
if(!(i==0 && j==0)){
sharedG[sx(x+i)][sy(y+j)]=G[n*(gy(iy+j))+gx(ix+i)];
}
}
}
}
else if(threadIdx.x==0){
for(int i=0;i<3;i++){
for(int j=0;j<3;j++){
if(!(i==0 && j==0)){
sharedG[sx(x-i)][sy(y+j)]=G[n*(gy(iy+j))+gx(ix-i)];
}
}
}
}
else{
//non-corner threads
sharedG[x][sy(y+1)]=G[n*(gy(iy+1))+ix];
sharedG[x][sy(y+2)]=G[n*(gy(iy+2))+ix];
}
}
//right edged non-corner threads
if(threadIdx.x==BLOCK_CACHE-1 && threadIdx.y%(BLOCK_CACHE-1)!=0){
sharedG[sx(x+1)][y]=G[n*iy+gx(ix+1)];
sharedG[sx(x+2)][y]=G[n*iy+gx(ix+2)];
}
//left edged non-corner threads
if(threadIdx.x==0 && threadIdx.y%(BLOCK_CACHE-1)!=0){
sharedG[sx(x-1)][y]=G[n*iy+gx(ix-1)];
sharedG[sx(x-2)][y]=G[n*iy+gx(ix-2)];
}
__syncthreads();
//for all the neighbors
for(int c=0;c<5;c++){
for(int d=0;d<5;d++){
//Do not update if the next neighbor coincides with the current point
if((c!=2) || (d!=2)){
//Windows centered on the edge lattice points wrap around to the other side
s_y = sy((c-2)+y);
s_x = sx((d-2)+x);
//Influence of a neighbor is increased
//Add to infl the weight*value of the previous neighbor
infl += sharedG[s_x][s_y] * w[c*5+d];
}
}
}
//Next value of a moment is defined according to the value of infl
if(infl>0.0001){
newG[iy*n+ix]=1;
}else if(infl<-0.0001){
newG[iy*n+ix]=-1;
}else{
newG[iy*n+ix]=G[iy*n+ix];
}
}
//update G coordinates - traverse horizontally though G map
if((ix+thread_step_x)/n>=1){
iy=blockDim.y*gridDim.y+iy;
}else{
iy=iy;
}
ix= (ix+thread_step_x)%n;
}
}
void ising( int *G, double *w, int k, int n){
int *newG,*swapG;
cudaMallocManaged(&newG,n*n*sizeof(int)); //save previous G before changing it
dim3 block(BLOCK_DIMENSION, BLOCK_DIMENSION);
int grid_dimension = GRID_DIMENSION; //define it gloabaly or find a way to produce it
dim3 grid(grid_dimension, grid_dimension);
//for every iteration (k)
for(int t=0;t<k;t++){
//Call kernel function
calc_moment<<<grid,block>>>(G, newG, w,n);
// Synchronize threads before swapping the arrays
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
//Swap arrays G and newG
swapG=newG;
newG=G;
G=swapG;
}
//If last k is an odd number, then the returned G should be newG
if(k % 2 == 1){
memcpy(newG, G, n*n*sizeof(int));
}
}
int main(){
//k = number of iterations
int k = 1;
int n=N;
// Array of weights
double *weights;
cudaMallocManaged(&weights,5*5*sizeof(double));
double w[25] = {0.004, 0.016, 0.026, 0.016, 0.004,
0.016, 0.071, 0.117, 0.071, 0.016,
0.026, 0.117, 0, 0.117, 0.026,
0.016, 0.071, 0.117, 0.071, 0.016,
0.004, 0.016, 0.026, 0.016, 0.004};
memcpy(weights,w,sizeof(w));
// Get the moments of array G from the binary file
FILE *fptr = fopen("conf-init.bin","rb");
if (fptr == NULL){
printf("Error: Cannnot open file");
exit(1);
}
int *G;
cudaMallocManaged(&G,n*n*sizeof(int));
fread(G, sizeof(int), n*n, fptr);
fclose(fptr);
//Save a copy of G to call again function ising() for different k
//because ising() is changing the array G
int *copyG;
cudaMallocManaged(©G,n*n*sizeof(int));
memcpy(copyG, G, n*n*sizeof(int));
//Call ising for k=1
ising(G, weights, k, n);
// Check results by comparing with ready data for k=1
int *expected;
cudaMallocManaged(&expected,n*n*sizeof(int));
fptr = fopen("conf-1.bin","rb");
if (fptr == NULL){
printf("Error: Cannnot open file");
exit(1);
}
fread(expected, sizeof(int), n*n, fptr);
fclose(fptr);
validation(n,k,expected,G);
//Call ising for k=4
k=4;
memcpy(G, copyG, n*n*sizeof(int));
ising(G, weights, k, n);
// Check for k = 4
fptr = fopen("conf-4.bin","rb");
if (fptr == NULL){
printf("Error: Cannnot open file");
exit(1);
}
fread(expected, sizeof(int), n*n, fptr);
fclose(fptr);
validation(n,k,expected,G);
//Call ising for k=11;
k=11;
memcpy(G, copyG, n*n*sizeof(int));
ising(G, weights, k, n);
// Check for k = 11
fptr = fopen("conf-11.bin","rb");
if (fptr == NULL){
printf("Error: Cannnot open file");
exit(1);
}
fread(expected, sizeof(int), n*n, fptr);
fclose(fptr);
validation(n,k,expected,G);
return 0;
}
|
9,604 | typedef unsigned long long int uint64;
typedef unsigned int uint32;
typedef unsigned short uint16;
typedef unsigned char uint8;
typedef union
{
uint8 u8[4];
uint32 u32[1];
} t32;
typedef union
{
uint8 u8[8];
uint32 u32[2];
uint64 u64[1];
} t64;
typedef union
{
uint8 u8[16];
uint16 u16[8];
uint32 u32[4];
ulonglong2 u128;
} t128;
typedef union
{
uint8 u8[32];
uint16 u16[16];
uint32 u32[8];
uint64 u64[4];
ulonglong2 u128[2];
ulonglong4 u256;
} t256;
typedef union
{
uint16 u16[32];
uint32 u32[16];
uint64 u64[8];
ulonglong2 u128[4];
ulonglong4 u256[2];
} t512;
#define copy(a, b) do {\
a[0] = b[0];\
a[1] = b[1];\
a[2] = b[2];\
a[3] = b[3];\
a[4] = b[4];\
a[5] = b[5];\
a[6] = b[6];\
a[7] = b[7];\
}while(0)\
#define copy32x8(a, b) do {\
a[0] = b[0];\
a[1] = b[1];\
a[2] = b[2];\
a[3] = b[3];\
a[4] = b[4];\
a[5] = b[5];\
a[6] = b[6];\
a[7] = b[7];\
a[8] = b[8];\
a[9] = b[9];\
a[10] = b[10];\
a[11] = b[11];\
a[12] = b[12];\
a[13] = b[13];\
a[14] = b[14];\
a[15] = b[15];\
a[16] = b[16];\
a[17] = b[17];\
a[18] = b[18];\
a[19] = b[19];\
a[20] = b[20];\
a[21] = b[21];\
a[22] = b[22];\
a[23] = b[23];\
a[24] = b[24];\
a[25] = b[25];\
a[26] = b[26];\
a[27] = b[27];\
a[28] = b[28];\
a[29] = b[29];\
a[30] = b[30];\
a[31] = b[31];\
}while(0)\
#define copy64(a, b) do {\
a[0] = b[0];\
a[1] = b[1];\
a[2] = b[2];\
a[3] = b[3];\
}while(0)\
#define copy64_2(a, b) do {\
a[0] = b[4];\
a[1] = b[5];\
a[2] = b[6];\
a[3] = b[7];\
}while(0)\
/* loop unrolling */
#define unroll_1_0(a) do { a(1) a(0) } while (0)
#define unroll_3_0(a) do { a(3) a(2) a(1) a(0) } while (0)
#define unroll_4(a) do { a(0) a(1) a(2) a(3) } while (0)
#define unroll_8(a) do { a(0) a(1) a(2) a(3) a(4) a(5) a(6) a(7) } while (0)
#define unroll_1_7(a) do { a(1) a(2) a(3) a(4) a(5) a(6) a(7) } while (0)
#define unroll_7(a) do { a(0) a(1) a(2) a(3) a(4) a(5) a(6) } while (0)
#define unroll_7_0(a) do { a(7) a(6) a(5) a(4) a(3) a(2) a(1) a(0) } while (0) |
9,605 |
extern "C" __global__ void
timedReduction( float const *input, float *output, clock_t *timer){
extern __shared__ float shared[];
int const tid = threadIdx.x;
int const bid = blockIdx.x;
if(tid == 0)
timer[bid] = clock();
shared[tid] = input[tid];
shared[tid+blockDim.x] = input[tid+blockDim.x];
for(int d = blockDim.x; d>0; d /= 2){
__syncthreads();
if(tid<d){
float f0 = shared[tid];
float f1 = shared[tid+d];
if(f1 < f0){
shared[tid] = f1;
}
}
}
if(tid == 0)
output[bid] = shared[0];
__syncthreads();
if(tid == 0)
timer[bid+gridDim.x] = clock();
}
|
9,606 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/generate.h>
#include <thrust/transform.h>
#include <math.h>
#include <stdio.h>
#define N (1 << 20)
using namespace thrust::placeholders;
int main(void) {
thrust::host_vector<float> hvec_x(N);
thrust::host_vector<float> hvec_y(N);
thrust::generate(hvec_x.begin(), hvec_x.end(), rand);
thrust::generate(hvec_y.begin(), hvec_y.end(), rand);
thrust::device_vector<float> dvec_x = hvec_x;
thrust::device_vector<float> dvec_y = hvec_y;
thrust::transform(dvec_x.begin(), dvec_x.end(), dvec_x.begin(), _1 / RAND_MAX);
thrust::transform(dvec_y.begin(), dvec_y.end(), dvec_y.begin(), _1 / RAND_MAX);
thrust::device_vector<float> dvec_inCircle(N);
thrust::transform(dvec_x.begin(), dvec_x.end(), dvec_y.begin(), dvec_inCircle.begin(), (_1*_1 + _2*_2) < 1);
float pi = thrust::reduce(dvec_inCircle.begin(), dvec_inCircle.end())*4.f/N;
printf("pi = %f\n", pi);
return 0;
}
|
9,607 | #include "includes.h"
__global__ void SetVauleInIdxMinMax( float* vector, int id_min, int id_max, float value)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id >= id_min && id <= id_max)
vector[id] = value;
} |
9,608 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#include <time.h>
#include <math.h>
#define SIZE 1000
#define BLKS 4
#define THREADSPBLKS 256
#define TILE_WIDTH 8
__global__
void heatCalcKernel(float * g_d,float * h_d, int width, int itr, int new_len, int new_width)
{
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int id = (row * new_width) + col;
row = (id / width);
col = id % width;
int left = id - 1;
int right = id + 1;
int top = ((row - 1) * width) + col;
int bottom = ((row + 1) * width + col);
int tmp = 2;
if( id < (width * (itr + 1))){
if(((id % width) == 0) || ((id % width) == (width - 1)) || (id < width) || (id >= (width * (width - 1)))){
h_d[id] = g_d[id];
}else{
h_d[id] = 0.25 * (g_d[top] + g_d[left] + g_d[bottom] + g_d[right]);
}
}else if ( id < (new_len - (width * (itr + 1)))){
if(((id % width) == 0) || ((id % (2 * itr)) == 0) || ((id % (tmp * itr)) == ((tmp * itr) - 1))){
h_d[id] = g_d[id];
}else{
h_d[id] = 0.25 * (g_d[id -1] + g_d[id + 1] + g_d[id - itr - tmp] + g_d[id + itr + tmp]);
}
}else{
if((((id - (new_len - (width * (itr + tmp)))) % width) == 0) || (((id - (new_len - (width * (itr + 1)))) % width) == (width - 1)) || ((id - (new_len - (width * (itr + 1)))) >= (width * (itr - 1)))){
h_d[id] = g_d[id];
} else{
h_d[id] = 0.25 * (g_d[id -1] + g_d[id + 1] + g_d[id - width] + g_d[id + width]);
}
}
__syncthreads();
g_d[id] = h_d[id];
__syncthreads();
}
void heatCalc()
{
clock_t tic;
clock_t toc;
tic = clock();
int width = 1000000;
int len = width * width;
int itr = 50;
int remove = (width - itr - 1) * (width - itr - 1);
int new_len = len - remove;
float *inhost = (float*)malloc(len*sizeof(float));
if(inhost == NULL){
printf("Out of memory\n");
exit(-1);
}
float *inhost1 = (float*)malloc(new_len*sizeof(float));
if(inhost1 == NULL){
printf("Out of memory\n");
exit(-1);
}
float *outhost = (float*)malloc(new_len*sizeof(float));
if(outhost == NULL){
printf("Out of memory\n");
exit(-1);
}
int j;
float * g_d;
float * h_d;
int counter = 0;
/*----------------------------------------------------------------*/
cudaError_t error;
cudaDeviceProp dev;
error = cudaGetDeviceProperties(&dev, 0);
if(error != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("\nDevice %d:\n", 0);
printf("name: %s\n",dev.name);
cudaSetDevice(0);
/*--------------------------------------------------------------*/
int count = 0;
for( j = 0; j < len; j++){
if((j >= 10) && (j <= 30)){
inhost[j] = 150;
inhost1[count] = inhost[j];
}else if((j < width) || ((j % width) == 0) || ((j % width) == (width - 1)) || (j >= (width * (width - 1)))){
inhost[j] = 80;
inhost1[count] = inhost[j];
}else{
inhost[j] = 0;
if (((j % width) < (itr + 1)) || ((j % width) > (width -(itr + 1)))){
inhost1[count] = inhost[j];
}
}
count++;
}
free(inhost);
printf("---------\n");
cudaMalloc((void**)&g_d, new_len*sizeof(float));
//intialize the matrix
cudaMemcpy(g_d,inhost1,new_len*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc((void**)&h_d, new_len*sizeof(float));
int new_width = ceil(sqrt(new_len));
int grid = ceil(new_width / TILE_WIDTH);
dim3 dimGrid(grid,grid);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH);
// kernel invocation
for(counter = 0; counter < itr; counter++){
heatCalcKernel<<<dimGrid,dimBlock>>>(g_d,h_d,width,itr,new_len,new_width);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
//transfer C_d from device to host
cudaMemcpy(outhost, h_d, (new_len*sizeof(float)), cudaMemcpyDeviceToHost);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaFree(g_d);
cudaFree(h_d);
for( j = 0; j < new_len; j++){
inhost1[j] = outhost[j];
}
toc = clock();
double time_taken_parallel = (double)(toc -tic)/CLOCKS_PER_SEC; // in seconds
printf("time taken: %f\n", time_taken_parallel);
free(inhost1);
free(outhost);
}
int main()
{
heatCalc();
return 0;
}
|
9,609 | //IN THE NAME OF ALLAH
#include <stdio.h>
#include <stdlib.h>
void merge(int *, int , int , int);
void merge_sort(int * arr, int right, int left){
if (left<right){
int middle = left + (right - left) / 2;
merge_sort(arr, left, middle);
merge_sort(arr, middle + 1, right);
merge(arr, left, middle, right);
}
}
void merge(int * arr, int left, int middle, int right){
int i, j, k;
int n1 = middle - left + 1;
int n2 = right - middle;
int left_arr[n1];
int right_arr[n2];
for (i = 0; i < n1; i++){
left_arr[i] = arr[left + i];
}
for (j = 0; j < n2; j++){
right_arr[j] = arr[j + middle + 1];
}
i = 0;
j = 0;
k = 0;
while (i < n1 && j < n2) {
if (left_arr[i] <= right_arr[j]) {
arr[k] = left_arr[i];
i++;
}
else{
arr[k] = right_arr[j];
j++;
}
k++;
}
while (i < n1) {
arr[k] = left_arr[i];
i++;
k++;
}
while (j < n2) {
arr[k] = right_arr[j];
j++;
k++;
}
}
void print_array(int * A, int size) {
int i;
for (i=0; i < size; i++)
printf("%d ", A[i]);
printf("\n");
}
int main()
{
int size = 0;
scanf("%d", & size);
int i = 0;
int * arr = (int *) malloc(sizeof(int) * size);
for (i = 0; i < size; i++){
scanf("%d", &arr[i]);
}
printf("Given array is: \n");
print_array(arr, size);
merge_sort(arr, 0, size - 1);
printf("\nSorted array is: \n");
print_array(arr, size);
return 0;
} |
9,610 | #include <iostream>
#define N_MEMELEM 32
__global__ void init_devmem(double *mem)
{
mem[blockIdx.x*blockDim.x + threadIdx.x] = blockIdx.x*blockDim.x + threadIdx.x;
}
__global__ void double_devmem(double *mem)
{
mem[blockIdx.x*blockDim.x + threadIdx.x] *= 2;
}
void do_cuda_init(double *dev_mem)
{
init_devmem<<<N_MEMELEM/32,32>>>(dev_mem);
}
void do_cuda_double(double *dev_mem)
{
double_devmem<<<N_MEMELEM/32,32>>>(dev_mem);
}
cudaIpcMemHandle_t get_memhandle(void *devmem)
{
cudaError_t err_dev;
cudaIpcMemHandle_t dev_mem_handle;
err_dev = cudaIpcGetMemHandle(&dev_mem_handle, devmem);
std::cout << "getHandle: " << cudaGetErrorName(err_dev) << ": " << cudaGetErrorString(err_dev) << std::endl;
return dev_mem_handle;
}
void *cuda_open_handle(cudaIpcMemHandle_t dev_mem_handle)
{
cudaError_t err_dev;
void *mem_ptr=NULL;
err_dev = cudaIpcOpenMemHandle(&mem_ptr, dev_mem_handle, cudaIpcMemLazyEnablePeerAccess);
std::cout << "openHandle: " << cudaGetErrorName(err_dev) << ": " << cudaGetErrorString(err_dev) << std::endl;
return mem_ptr;
}
void cuda_close_handle(void *devmem)
{
cudaError_t err_dev;
err_dev = cudaIpcCloseMemHandle(devmem);
std::cout << "closeHandle: " << cudaGetErrorName(err_dev) << ": " << cudaGetErrorString(err_dev) << std::endl;
}
|
9,611 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
__global__ void addition(int *c, int *a, int *b)
{
int index = (blockIdx.x<<27)+(threadIdx.x<<18)+(threadIdx.y<<9)+threadIdx.z;
c[index] = a[index] + b[index];
}
int main() {
int arraySize;
scanf("%d", &arraySize);
clock_t start = clock();
int *a;
int *b;
int *c;
c = (int *)malloc(arraySize*sizeof(int));
a = (int *)malloc(arraySize*sizeof(int));
b = (int *)malloc(arraySize*sizeof(int));
for (int i = 0; i < arraySize; i++) {
c[i] = 0;
a[i] = 99999;
b[i] = 99999;
}
int *d_a=0;
int *d_b=0;
int *d_c=0;
cudaMalloc((void **)&d_a, arraySize*sizeof(int));
cudaMalloc((void **)&d_b, arraySize*sizeof(int));
cudaMalloc((void **)&d_c, arraySize*sizeof(int));
cudaMemcpy(d_a, a, arraySize*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, arraySize*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, arraySize*sizeof(int), cudaMemcpyHostToDevice);
//int blocks = (arraySize >> 27);
addition <<<dim3(10,1,1), dim3(512,512,512)>>>(d_c, d_b, d_a);
cudaMemcpy(c, d_c, arraySize*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
clock_t end = clock();
float seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("time cost: %f\n", seconds);
/*
for (int i = 0; i < arraySize;i++) {
printf("%d ", c[i]);
}*/
return 0;
}
|
9,612 | // * -PSM2D-
// * P and SV WAVES
// ************************************************************************
// * Calculating P and SV wavefields in homogeneous half-space for a *
// * point source by the Finite-Difference Method. *
// * **********************************************************************
// * Last modified: May 14, 2017 *
// * Author: Yanbin WANG *
// * Department of Earth and Planetary Sciences *
// * Faculty of Sciences, Kyushu University *
// * Hakozaki 6-10-1, Fukuoka, 812-8581, Japan *
// * Now at: Department of Geophysics, Peking University *
// * 100871, Beijing, China *
// * Modified to staggered-grid scheme on 16 June 2005. *
// * Modified to PSM/FDM hybrid method in February 2006 *
// * by Xing Wei and Yanbin Wang. *
// * Modified for Lanzhou basin on 11 January 2011. *
// * by Yanbin Wang. *
// * Modified to Finite-Difference Method on March, 2016 *
// * by Xueyan Li and Yanbin Wang. *
// * Modified to Cuda C on March, 2017 *
// * by Congyue Cui and Yanbin Wang. *
// ************************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
// map block and thread index to i and j
#define devij(dimx, dimy) \
int i = blockIdx.x % dimx; \
int j = threadIdx.x + (blockIdx.x - i) / dimx * dimy / d_nbt; \
int ij = i * dimy + j
// parameter
const int nx = 2048, ny = 1024, nx2 = nx * 2, ny2 = ny * 2;
const float dx = 0.0342, dy = 0.0342, dt = 1.0e-3;
const int ntmax = 30000, nwrite = 500;
const float at = 0.1 / 4.0, t0 = at * 2;
const int na = 0;
const int nst = 512, nsskip = nx / nst;
const int nxa = 20, nya = 20;
const int nskip = 10, ntskp = ntmax / nskip + 1;
const int nbt = 8;
// plotting parameter
const int nsnap=60;
const float pamp=0.5,samp=2.2;
const float pampall=0.5,sampall=2.2;
// device parameter
__constant__ int is0 = 292, js0 = 146;
__constant__ float ax = 0.0342, ay = 0.0342;
__constant__ float fxx = 0.0, fyy = 0.0, fzz = 0.0;
__constant__ float dpxx = 0.0, dpyy = 0.0, dpzz = 0.0;
__constant__ float rmxx = 1.0, rmxy = 0.0, rmyy = -1.0, rmyx=0.0;
__constant__ float c0 = 9.0 / 8.0, c1 = 1.0 / 24.0;
__constant__ int d_nbt = 8;
// matrix related function
namespace mat{
float *create(const int m) {
// create floating-point device array
float *a;
cudaMalloc((void**)&a, m * sizeof(float));
return a;
}
float *create_h(const int m) {
// create floating-point host array
return (float *)malloc(m * sizeof(float));
}
int *create_i(const int m){
// create integer device array
int *a;
cudaMalloc((void**)&a, m * sizeof(int));
return a;
}
__global__ void set_d(float *a, const float init, const int m, const int n){
devij(m, n);
a[ij] = init;
}
void set(float *a, const float init, const int m, const int n){
// initialize the value of a device matrix
mat::set_d<<<m * nbt, n / nbt>>>(a, init, m, n);
}
void copyhd(float *d_a, const float *a, const int m){
// copy memory from host(a) to device(d_a)
cudaMemcpy(d_a, a , m * sizeof(float), cudaMemcpyHostToDevice);
}
void copydh(float *a, const float *d_a, const int m){
// copy memory from device(d_a) to host(a)
cudaMemcpy(a, d_a , m * sizeof(float), cudaMemcpyDeviceToHost);
}
void write(FILE *file, float *d_a, float *a, const int nx, const int ny){
// write matrix data to file
mat::copydh(a, d_a, nx * ny);
for(int i= 0; i < nx; i++){
for(int j = 0; j < ny; j++){
fprintf(file,"%f\n", a[i * ny + j]);
}
}
}
void read(FILE *file, float *a, const int nx, const int ny){
// read matrix data from file
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
fscanf(file, "%f", a + ij);
}
}
}
}
// forward related function
namespace psv{
__device__ float dherrman(float a, float x, float x0){
float a2 = 2.0 * a;
float t = x - x0;
float td = (t + a2) / a;
if(t <= -a2) return 0.0;
if(t <= -a) return td / (a2 * a);
if(t <= a) return (-td + 2.0) / (a2 * a);
if(t <= a2) return (td - 4.0) / (a2 * a);
return 0.0;
}
__device__ float herrman(float a, float x, float x0){
float a2 = 2.0*a;
float t = x - x0;
float td = (t + a2)/a;
if(t <= -a2) return 0.0;
if(t <= -a) return (0.5 * td * td) / a2;
if(t <= a) return (-0.5 * td * td + 2.0 * td - 1.0) / a2;
if(t <= a2) return (0.5 * td * td - 4.0 * td + 8.0) / a2;
return 0.0;
}
__device__ float fxmxz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = -psv::dherrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fzmxz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = -psv::dherrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fzmzz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = -psv::dherrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fxmxx(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at,float xs,float zs){
float x0 = i0*dx+xs;
float z0 = j0*dz+zs;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = -psv::dherrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fx(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float fz(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float exforce(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = -psv::dherrman(ax,x,x0);
float fhz = psv::herrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__device__ float ezforce(int i, int j, int i0, int j0, float dx, float dz, float ax, float az, float t, float t0, float at){
float x0 = i0*dx;
float z0 = j0*dz;
float x = (i+1)*dx;
float z = (j+1)*dz;
float fhx = psv::herrman(ax,x,x0);
float fhz = -psv::dherrman(az,z,z0);
float fht = psv::herrman(at,t,t0);
return fhx*fhz*fht;
}
__global__ void istxy(int *istx, int *isty, const int na){
int i = blockIdx.x;
istx[i] = i * 4 + 1;
isty[i] = na + 1;
}
__global__ void rdl(float *rig, float *den, float *lam,
const float *nd, const float *q1d,
const int nx2, const int ny2, const float dy){
devij(nx2, ny2);
float depth = j * dy / 2.0;
float vpb, vsb, rob;
float rrigb, rlanb, rdenb;
if(depth <= -q1d[i] / 1000.0){
vpb = 1.70;
vsb = 0.85;
rob = 1.8;
}
else if(depth <= -nd[i] / 1000.0){
vpb = 4.0;
vsb = 2.1;
rob = 2.4;
}
else if(depth <= 15.0){
vpb = 5.8;
vsb = 3.3;
rob = 2.7;
}
else if(depth <= 32.0){
vpb = 6.4;
vsb = 3.6;
rob = 2.85;
}
else{
vpb = 6.9;
vsb = 3.9;
rob = 3.1;
}
rrigb = rob * vsb * vsb;
rlanb = rob * vpb * vpb - 2.0 * rrigb;
rdenb = rob;
if(j < na * 2){
rig[ij] = 0.0;
den[ij] = rdenb;
lam[ij] = 0.0;
}
else{
rig[ij] = rrigb;
den[ij] = rdenb;
lam[ij] = rlanb;
}
}
__global__ void gg(float *ggg, const float apara,
const int nx, const int ny, const int nxa, const int nya){
devij(nx, ny);
if(i + 1 < nxa){
ggg[ij]=exp(-pow(apara * (nxa - i - 1), 2));
}
else if(i + 1 > (nx - nxa + 1)){
ggg[ij]=exp(-pow(apara * (i - nx + nxa), 2));
}
else if(j + 1 > (ny - nya + 1)){
ggg[ij]=exp(-pow(apara * (j - ny + nya), 2));
}
else{
ggg[ij]=1.0;
}
}
__global__ void finidyy(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *ai = a + i * ny;
if(j == 0){
dya[ij] = 1.0 / dy * (c0 * ai[0] - c1 * ai[1]);
}
else if(j == 1){
dya[ij] = 1.0 / dy * (c0 * (ai[1] - ai[0]) - c1 * ai[2]);
}
else if(j == ny - 1){
dya[ij] = 1.0 / dy * (c0 * (ai[ny - 1] - ai[ny - 2]) + c1 * ai[ny - 3]);
}
else{
dya[ij] = 1.0 / dy * (c0 * (ai[j] - ai[j - 1]) - c1 * (ai[j + 1] - ai[j - 2]));
}
}
__global__ void finidyx(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *ai = a + i * ny;
if(j == 0){
dya[ij] = 1.0 / dy * (c0 * (ai[1] - ai[0]) - c1 * ai[2]);
}
else if(j == ny - 2){
dya[ij] = 1.0 / dy * (c0 * (ai[ny - 1] - ai[ny - 2]) + c1 * ai[ny - 3]);
}
else if(j == ny - 1){
dya[ij] = 1.0 / dy * (c0 * (-ai[ny - 1]) + c1 * ai[ny - 2]);
}
else{
dya[ij] = 1.0 / dy * (c0 * (ai[j + 1] - ai[j]) - c1 * (ai[j + 2] - ai[j - 1]));
}
}
__global__ void finidxy(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *aj = a + j;
if(i == 0){
dya[ij] = 1.0 / dx * (c0 * aj[0] - c1 * aj[ny]);
}
else if(i == 1){
dya[ij] = 1.0 / dx * (c0 * (aj[ny] - aj[0]) - c1 * aj[2 * ny]);
}
else if(i == nx - 1){
dya[ij] = 1.0 / dx * (c0 * (aj[(nx - 1) * ny] - aj[(nx - 2) * ny]) + c1 * aj[(nx - 3) * ny]);
}
else{
dya[ij] = 1.0 / dx * (c0 * (aj[i * ny] - aj[(i - 1) * ny]) - c1 * (aj[(i + 1) * ny] - aj[(i - 2) * ny]));
}
}
__global__ void finidxx(float *a, float *dya,
const int nx, const int ny, const float dx, const float dy, const float dt){
devij(nx, ny);
float *aj = a + j;
if(i == 0){
dya[ij] = 1.0 / dx * (c0 * (aj[ny] - aj[0]) - c1 * aj[2 * ny]);
}
else if(i == nx - 2){
dya[ij] = 1.0 / dx * (c0 * (aj[(nx - 1) * ny] - aj[(nx - 2) * ny]) + c1 * aj[(nx - 3) * ny]);
}
else if(i == nx - 1){
dya[ij] = 1.0 / dx * (c0 * (-aj[(nx - 1) * ny ]) + c1 * aj[(nx - 2) * ny]);
}
else{
dya[ij] = 1.0 / dx * (c0 * (aj[(i + 1) * ny] - aj[i * ny]) - c1 * (aj[(i + 2) * ny] - aj[(i - 1) * ny]));
}
}
__global__ void sxy(float *sxx, float *syy, float *sxy,
const float *lam, const float *rig, const float *ggg,
const float *dxvx, const float *dxvy, const float *dyvx, const float *dyvy,
const int nx, const int ny, const float dt){
devij(nx, ny);
float ram1 = lam[(i * 2 + 1) * ny + j * 2];
float rig1 = rig[(i * 2 + 1) * ny + j * 2];
float rig2 = rig[i * 2 * ny + j * 2 + 1];
float gg = ggg[ij];
float sxxt1ij = (ram1 + 2.0 * rig1) * dxvx[ij] + ram1 * dyvy[ij];
float syyt1ij = (ram1 + 2.0 * rig1) * dyvy[ij] + ram1 * dxvx[ij];
float sxyt1ij = rig2 * (dxvy[ij] + dyvx[ij]);
sxx[ij] = sxx[ij] * gg + dt * sxxt1ij;
syy[ij] = syy[ij] * gg + dt * syyt1ij;
sxy[ij] = sxy[ij] * gg + dt * sxyt1ij;
if(j == na) syy[i * ny + na] = 0.0;
}
__global__ void vxyuxy(float *vx, float *vy, float *ux, float *uy,
const float *dxsxx, const float *dxsxy, const float *dysxy, const float *dysyy,
const float *ggg, const float *den, const float t, const float ftmax,
const int nx, const int ny, const float dx, const float dy, const float dt,
const float t0, const float at){
devij(nx, ny);
float gg = ggg[ij];
float denvx = den[i * 2 * ny + j * 2];
float denvy = den[(i * 2 + 1) * ny + j * 2 + 1];
float fx1,fy1;
if(t < ftmax){
fx1 = rmxx * psv::fxmxx(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, 0.0, 0.0) +
rmxy * psv::fxmxz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, 0.0, 0.0) +
fxx * psv::fx(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at) +
dpxx * psv::exforce(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at);
fy1 = rmyx * psv::fzmxz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, -dx/2, -dy/2) +
rmyy * psv::fzmzz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at, -dx/2, -dy/2) +
fzz * psv::fz(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at) +
dpzz * psv::ezforce(i, j ,is0, js0, dx, dy, ax, ay, t, t0, at);
}
else{
fx1 = 0.0;
fy1 = 0.0;
}
float uxt2ij = (dxsxx[ij] + dysxy[ij] + fx1) / denvx;
float uyt2ij = (dxsxy[ij] + dysyy[ij] + fy1) / denvy;
vx[ij] = vx[ij] * gg + dt * uxt2ij;
vy[ij] = vy[ij] * gg + dt * uyt2ij;
ux[ij] = ux[ij] * gg + dt * vx[ij];
uy[ij] = uy[ij] * gg + dt * vy[ij];
}
__global__ void uxyall(float *uxall, float *uyall, const float *ux, const float *uy,
const int *istx, const int *isty, const int it1, const int ntskp, const int ny){
int ns = blockIdx.x;
int isx = istx[ns]-1;
int isy = isty[ns]-1;
if(threadIdx.x){
uxall[ns * ntskp + it1] = ux[isx * ny + isy];
}
else{
uyall[ns * ntskp + it1] = uy[isx * ny + isy];
}
}
__global__ void ups(float *up, float *us, const float *dxux, const float *dyuy,
const float *dxuy, const float *dyux, const int nx, const int ny){
devij(nx, ny);
up[ij] = dxux[ij] + dyuy[ij];
us[ij] = dxuy[ij] - dyux[ij];
}
void query(){
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
for (int i = 0; i < devCount; ++i){
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", (unsigned int)devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", (unsigned int)devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", (unsigned int)devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", (unsigned int)devProp.totalConstMem);
printf("Texture alignment: %u\n", (unsigned int)devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
void forward(const char *oname, const char *wname, const int output){
// dimension
float *sxx = mat::create(nx * ny), *sxy = mat::create(nx * ny), *syy = mat::create(nx * ny);
float *den = mat::create(nx2 * ny2), *rig = mat::create(nx2 * ny2), *lam = mat::create(nx2 * ny2);
float *ux = mat::create(nx * ny), *uy = mat::create(nx * ny);
float *vx = mat::create(nx * ny), *vy = mat::create(nx * ny);
float *up = mat::create(nx * ny), *us = mat::create(nx * ny);
float *dxux = mat::create(nx * ny), *dxuy = mat::create(nx * ny);
float *dyux = mat::create(nx * ny), *dyuy = mat::create(nx * ny);
float *dxvx = mat::create(nx * ny), *dxvy = mat::create(nx * ny);
float *dyvx = mat::create(nx * ny), *dyvy = mat::create(nx * ny);
float *dxsxx = mat::create(nx * ny), *dxsxy = mat::create(nx * ny);
float *dysxy = mat::create(nx * ny), *dysyy = mat::create(nx * ny);
float *ggg = mat::create(nx * ny);
float *uxall = mat::create(nst * ntskp), *uyall = mat::create(nst * ntskp);
float *nd = mat::create(nx2), *q1d = mat::create(nx2);
float *h_up = mat::create_h(nx * ny), *h_us = mat::create_h(nx * ny);
float *h_uxall = mat::create_h(nst* ntskp), *h_uyall = mat::create_h(nst* ntskp);
int *istx = mat::create_i(nst), *isty = mat::create_i(nst);
// output file
FILE *wfile=fopen(wname,"w");
FILE *ofile=fopen(oname,"w");
// observation points
psv::istxy<<<nst, 1>>>(istx, isty, na);
// velocity structure
FILE *n4096 = fopen("N4096.dat", "r");
FILE *q14096 = fopen("Q14096.dat", "r");
float *h_nd = mat::create_h(nx2);
float *h_q1d = mat::create_h(nx2);
for(int i = 0; i < nx2; i++){
fscanf(n4096, "%f", &h_nd[i]);
fscanf(q14096, "%f", &h_q1d[i]);
}
fclose(n4096);
fclose(q14096);
mat::copyhd(nd, h_nd, nx2);
mat::copyhd(q1d, h_q1d, nx2);
free(h_nd);
free(h_q1d);
psv::rdl<<<nx2 * nbt, ny2 / nbt>>>(rig, den, lam, nd, q1d, nx2, ny2, dy);
// initialize
float ftmax = t0 + at * 2;
mat::set(vx, 0.0, nx, ny);
mat::set(vy, 0.0, nx, ny);
mat::set(ux, 0.0, nx, ny);
mat::set(uy, 0.0, nx, ny);
mat::set(sxx, 0.0, nx, ny);
mat::set(sxy, 0.0, nx, ny);
mat::set(syy, 0.0, nx, ny);
// absorbing boundary confition
float apara = 0.015;
psv::gg<<<nx * nbt, ny / nbt>>>(ggg, apara, nx, ny, nxa, nya);
// time step start
int ntw = 0;
int ntt = 0;
clock_t timestart=clock();
for(int it = 0; it < ntmax; it++){
if(it % 500 == 0){
printf("timestep: %d / %d\n", it, ntmax);
}
ntt++;
ntw++;
float t=dt*it;
psv::finidxx<<<nx * nbt, ny / nbt>>>(vx, dxvx, nx, ny, dx, dy, dt);
psv::finidxy<<<nx * nbt, ny / nbt>>>(vy, dxvy, nx, ny, dx, dy, dt);
psv::finidyx<<<nx * nbt, ny / nbt>>>(vx, dyvx, nx, ny, dx, dy, dt);
psv::finidyy<<<nx * nbt, ny / nbt>>>(vy, dyvy, nx, ny, dx, dy, dt);
psv::sxy<<<nx * nbt, ny / nbt>>>(sxx, syy, sxy, lam, rig, ggg, dxvx, dxvy, dyvx, dyvy, nx, ny, dt);
psv::finidxy<<<nx * nbt, ny / nbt>>>(sxx, dxsxx, nx, ny, dx, dy, dt);
psv::finidxx<<<nx * nbt, ny / nbt>>>(sxy, dxsxy, nx, ny, dx, dy, dt);
psv::finidyy<<<nx * nbt, ny / nbt>>>(sxy, dysxy, nx, ny, dx, dy, dt);
psv::finidyx<<<nx * nbt, ny / nbt>>>(syy, dysyy, nx, ny, dx, dy, dt);
psv::vxyuxy<<<nx * nbt, ny / nbt>>>(vx, vy, ux, uy, dxsxx, dxsxy, dysxy, dysyy, ggg, den, t, ftmax, nx, ny, dx, dy, dt, t0, at);
if(ntt == nskip){
// save waveform
ntt = 0;
uxyall<<<nst, 2>>>(uxall, uyall, ux, uy, istx, isty, (it+1)/nskip, ntskp, ny);
}
if(output && ntw == nwrite){
// write snapshot
ntw = 0;
psv::finidxx<<<nx * nbt, ny / nbt>>>(ux, dxux, nx, ny, dx, dy, dt);
psv::finidxy<<<nx * nbt, ny / nbt>>>(uy, dxuy, nx, ny, dx, dy, dt);
psv::finidyx<<<nx * nbt, ny / nbt>>>(ux, dyux, nx, ny, dx, dy, dt);
psv::finidyy<<<nx * nbt, ny / nbt>>>(uy, dyuy, nx, ny, dx, dy, dt);
psv::ups<<< nx * nbt, ny / nbt>>>(up, us, dxux, dyuy, dxuy, dyux, nx, ny);
mat::write(ofile, up, h_up, nx, ny);
mat::write(ofile, us, h_us, nx, ny);
}
}
{
printf("\ntotal time: %.2fs\n",(float)(clock()-timestart)/CLOCKS_PER_SEC);
size_t free_byte ;
size_t total_byte ;
cudaMemGetInfo( &free_byte, &total_byte ) ;
float free_db = (float)free_byte ;
float total_db = (float)total_byte ;
float used_db = total_db - free_db ;
printf("memory usage: %.1fMB / %.1fMB\n", used_db/1024.0/1024.0, total_db/1024.0/1024.0);
}
// write waveform
mat::write(wfile, uxall, h_uxall, nst, ntskp);
mat::write(wfile, uyall, h_uxall, nst, ntskp);
fclose(ofile);
fclose(wfile);
cudaFree(sxx); cudaFree(sxy); cudaFree(syy);
cudaFree(den); cudaFree(rig); cudaFree(lam);
cudaFree(ux); cudaFree(uy);
cudaFree(vx); cudaFree(uy);
cudaFree(up); cudaFree(us);
cudaFree(dxux); cudaFree(dxuy);
cudaFree(dyux); cudaFree(dyuy);
cudaFree(dxvx); cudaFree(dxvy);
cudaFree(dyvx); cudaFree(dyvy);
cudaFree(dxsxx); cudaFree(dxsxy);
cudaFree(dysxy); cudaFree(dysyy);
cudaFree(ggg);
cudaFree(nd); cudaFree(q1d);
cudaFree(istx); cudaFree(isty);
free(h_up); free(h_us);
free(h_uxall); free(h_uyall);
}
void waveform(const char *wname){
int ndskip = 1;
float dt2 = dt * 10, dx2 = dx * 4;
float *ux = mat::create_h(nst * ntskp), *uz = mat::create_h(nst * ntskp);
FILE *file = fopen(wname,"r");
FILE *filex = fopen("ux", "w");
FILE *filez = fopen("uz", "w");
mat::read(file, ux, nst, ntskp);
mat::read(file, uz, nst, ntskp);
fclose(file);
for(int i = 0; i < nst; i += nsskip){
fprintf(filex, ">\n");
fprintf(filez, ">\n");
for(int j = 0; j < ntskp; j += ndskip){
int ij = i * ntskp + j;
float tm = j*dt2;
float shift = i*dx2;
fprintf(filex, "%f %f\n", tm, ux[ij] * 15.0 + shift);
fprintf(filez, "%f %f\n", tm, uz[ij] * 15.0 + shift);
}
}
}
void snapshot(const char *oname){
FILE *file=fopen(oname,"r");
float *up = mat::create_h(nx * ny), *us = mat::create_h(nx * ny);
float *u = mat::create_h(nx * ny), *p = mat::create_h(nx * ny), *s = mat::create_h(nx * ny);
int n[5]={0,1,2,3,4};
FILE **snapshot = (FILE **)malloc(5*sizeof(FILE *));
*snapshot = fopen("snap1", "w");
*(snapshot + 1) = fopen("snap2", "w");
*(snapshot + 2) = fopen("snap3", "w");
*(snapshot + 3) = fopen("snap4", "w");
*(snapshot + 4) = fopen("snap5", "w");
float pmax, smax, cp, lp ,cs, ls, x, y;
for(int isnap = 0; isnap < nsnap; isnap++){
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
u[i * ny + j] = 0;
}
}
mat::read(file, up, nx, ny);
mat::read(file, us, nx, ny);
pmax=0.0;
smax=0.0;
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
if(pmax < abs(up[ij])){
pmax = abs(up[ij]);
}
if(smax < abs(us[ij])){
smax = abs(us[ij]);
}
}
}
// printf("Pmax=%f Smax=%f\n",pmax,smax);
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
cp=pamp;
lp=0.1*pmax;
if(abs(up[ij]) > cp && up[ij] < 0.0){
up[ij] = -cp;
}
else if(abs(up[ij]) > cp && up[ij] > 0.0){
up[ij] = cp;
}
if(abs(us[ij]) < lp){
up[ij] = 0.0;
}
}
}
for(int i = 0; i < nx; i++){
for(int j = 0; j < ny; j++){
int ij = i * ny + j;
cs = samp;
ls = 0.1 * smax;
if(abs(us[ij]) > cs && us[ij] < 0.0){
us[ij] = -cs;
}
else if(abs(us[ij]) > cs && us[ij] > 0.0){
us[ij] = cs;
}
if(abs(us[ij]) < ls){
us[ij] = 0.0;
}
}
}
if(isnap == n[0] || isnap == n[1] || isnap == n[2] || isnap == n[3] || isnap == n[4]){
for(int j = 0; j < ny; j++){
for(int i = 0; i < nx; i++){
int ij = i * ny + j;
x = i * dx;
y = j * dy;
p[ij] = up[ij] / pampall;
s[ij] = us[ij] / sampall;
// if(up[i][j]>1e-5||us[i][j]>1e-5){
// printf("%f %f\n", up[i][j],us[i][j]);
// }
}
}
for(int j = 0; j < ny; j++){
for(int i = 0; i < nx; i++){
int ij = i * ny + j;
x = i * dx;
y = j * dy;
if(abs(s[ij]) > abs(p[ij])){
u[ij] = -abs(s[ij]);
}
else if(abs(p[ij]) > abs(s[ij])){
u[ij] = abs(s[ij]);
}
fprintf(*(snapshot+isnap), "%f %f %f\n", x, y, u[ij]);
}
}
}
}
fclose(file);
fclose(*(snapshot));
fclose(*(snapshot+1));
fclose(*(snapshot+2));
fclose(*(snapshot+3));
fclose(*(snapshot+4));
}
}
int main(int argc , char *argv[]){
// command-line options (e.g. "psv.exe fsw". default: f)
// q: gpu device query
// f: forward modeling with waveform output only
// o: forward modeling with waveform and snapshot output (with much more time consumption)
// w: convert output waveform data to gmt readable format
// s: convert output snapshot data to gmt readable format
int cfg[5] = {0};
if(argc > 1){
for(int i = 0; i < argv[1][i] != '\0'; i++){
switch(argv[1][i]){
case 'q': cfg[0] = 1;break;
case 'f': cfg[1] = 1; break;
case 'o': cfg[1] = 1; cfg[2] = 1; break;
case 'w': cfg[3] = 1; break;
case 's': cfg[4] = 1; break;
}
}
}
else{
cfg[1] = 0;
}
// output file name
char *oname="opsv";
char *wname="wpsv";
if(cfg[0]) psv::query();
if(cfg[1]) psv::forward(oname, wname, cfg[2]);
if(cfg[3]) psv::waveform(wname);
if(cfg[4]) psv::snapshot(oname);
}
|
9,613 | #ifndef _GPU_MULTI_VECTOR_AND_CU_
#define _GPU_MULTI_VECTOR_AND_CU_
#endif |
9,614 | #include <stdio.h>
#include <iostream>
using namespace std;
#define TPB 256
#define ARRAY_SIZE 10
#define N (ARRAY_SIZE/TPB + 1)
__global__ void saxpy(float *x, float *y, const float a)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i<ARRAY_SIZE) {
y[i] = a*x[i] + y[i];
}
}
int main()
{
float *x, *y, *d_x, *d_y;
x = (float*)malloc(ARRAY_SIZE*sizeof(float));
y = (float*)malloc(ARRAY_SIZE*sizeof(float));
const int a = 3;
cudaMalloc(&d_x, ARRAY_SIZE*sizeof(float));
cudaMalloc(&d_y, ARRAY_SIZE*sizeof(float));
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = rand() % 1000;
y[i] = rand() % 1000;
cout << x[i] << "\n";
cout << y[i] << "\n\n";
}
cout << "---------------------" <<"\n";
cudaMemcpy(d_x, x, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
saxpy<<<N, TPB>>>(d_x, d_y, a);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < ARRAY_SIZE; i++) {
cout << x[i] << "\n";
cout << y[i] << "\n\n";
}
free(x);
free(y);
cudaFree(d_x);
cudaFree(d_y);
return 0;
}
|
9,615 | /*
isolating the kernel to compile using the -cubin option
which yields info about:
- register per thread
- shared memory per thread block
These are used in occupancy calculation
Output for this program:
ptxas info : 0 bytes gmem
ptxas info : Compiling entry function '_Z3addiPfS_' for 'sm_30'
ptxas info : Function properties for _Z3addiPfS_
0 bytes stack frame, 0 bytes spill stores, 0 bytes spill loads
ptxas info : Used 8 registers, 344 bytes cmem[0]
*/
#include <iostream>
#include <math.h>
//CUDA kernel to add elements of the matrix
// __global__ converts a function into a CUDA kernel
__global__
void add(int n, float *x, float *y)
{
// index of the current thread within the block
int index = blockIdx.x * blockDim.x + threadIdx.x;
// number of threads in a block
int stride = blockDim.x * gridDim.x;
// run each addition on a separate thread
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
|
9,616 | #include <cuda_runtime.h>
__global__
void cudaAddVecKernel(
const float *input1,
const float *input2,
float *output,
const unsigned int size)
{
uint thread_index = blockIdx.x * blockDim.x + threadIdx.x;
while (thread_index < size) {
output[thread_index] = input1[thread_index] + input2[thread_index];
thread_index += blockDim.x * gridDim.x;
}
}
void cudaCallAddVecKernel(const int blocks,
const unsigned int threadsPerBlock,
const float *input1,
const float *input2,
float *output,
const unsigned int size)
{
cudaAddVecKernel<<<blocks, threadsPerBlock>>>(input1, input2, output, size);
}
|
9,617 | #include <stdio.h>
__global__ void helloFromGPU(void)
{
printf("hello world from GPU (block thread)%d,%d!\n",blockIdx.x,threadIdx.x);
printf("blockdim %d,%d,%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("griddim %d,%d,%d\n",gridDim.x,gridDim.y,gridDim.z);
}
int main(int argc, char const *argv[])
{
printf("hello world from cpu\n");
dim3 grid(3);
dim3 block(2);
printf("block %d,%d,%d\n",block.x,block.y,block.z);
printf("grid %d,%d,%d\n",grid.x,grid.y,grid.z);
helloFromGPU<<<grid,block>>>();
cudaDeviceReset();
//cudaDeviceSynchronize();
return 0;
}
|
9,618 | #include "includes.h"
__global__ void reduceUnrolling16 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 16 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 16;
// unrolling 16
if (idx + 15 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
int c1 = g_idata[idx + 8 * blockDim.x];
int c2 = g_idata[idx + 9 * blockDim.x];
int c3 = g_idata[idx + 10 * blockDim.x];
int c4 = g_idata[idx + 11 * blockDim.x];
int d1 = g_idata[idx + 12 * blockDim.x];
int d2 = g_idata[idx + 13 * blockDim.x];
int d3 = g_idata[idx + 14 * blockDim.x];
int d4 = g_idata[idx + 15 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4 + c1 + c2 + c3 + c4
+ d1 + d2 + d3 + d4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
9,619 | #include "includes.h"
__global__ void rgb2yuv_gpu_son(unsigned char * d_r, unsigned char * d_g, unsigned char * d_b, unsigned char * d_y , unsigned char * d_u ,unsigned char * d_v , int size)
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
if (x >= size) return;
unsigned char r, g, b;
r = d_r[x];
g = d_g[x];
b = d_b[x];
d_y[x] = (unsigned char)( 0.299*r + 0.587*g + 0.114*b);
d_u[x] = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128);
d_v[x] = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128);
} |
9,620 | #include <thrust/tuple.h>
#include <stdio.h>
int main(){
thrust::tuple<int, int, float, const char *> test_tuple(23, 99, 4.5, "thrust");
printf("%d\t%d\t%g\t%s\n", thrust::get<0>(test_tuple),
thrust::get<1>(test_tuple),
thrust::get<2>(test_tuple),
thrust::get<3>(test_tuple));
return 0;
}
|
9,621 | #include <iostream>
#ifdef DEBUG
#define CUDA_CHECK(x) do {\
(x); \
cudaError_t e = cudaGetLastError(); \
if (cudaSuccess != e) { \
printf("cuda failure \"%s\" at %s:%d\n", \
cudaGetErrorString(e), \
__FILE__, __LINE__); \
exit(1); \
} \
} while (0)
#else
#define CUDA_CHECK(x) (x)
#endif
// main program for the CPU
int main(void) {
// host-side data
const int SIZE = 5;
const int a[SIZE] = { 1, 2, 3, 4, 5 };
int b[SIZE] = { 0, 0, 0, 0, 0 };
// print source
printf("a = {%d,%d,%d,%d,%d}\n", a[0], a[1], a[2], a[3], a[4]);
// device-side data
int *dev_a = 0;
int *dev_b = 0;
// allocate device memory
CUDA_CHECK( cudaMalloc((void**)&dev_a, SIZE * sizeof(int)) );
CUDA_CHECK( cudaMalloc((void**)&dev_b, SIZE * sizeof(int)) );
// copy from host to device
CUDA_CHECK( cudaMemcpy(dev_a, a, SIZE * sizeof(int), cudaMemcpyDeviceToDevice) ); // BOMB here !
// copy from device to device
CUDA_CHECK( cudaMemcpy(dev_b, dev_a, SIZE * sizeof(int), cudaMemcpyDeviceToDevice) );
// copy from device to host
CUDA_CHECK( cudaMemcpy(b, dev_b, SIZE * sizeof(int), cudaMemcpyDeviceToHost) );
// free device memory
CUDA_CHECK( cudaFree(dev_a) );
CUDA_CHECK( cudaFree(dev_b) );
// print the result
printf("b = {%d,%d,%d,%d,%d}\n", b[0], b[1], b[2], b[3], b[4]);
// done
return 0;
}
|
9,622 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm_1 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + 2*(int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_10_;
double _t_11_;
double _t_8_;
double _t_9_;
double _t_32_;
double _t_33_;
double _t_34_;
double _t_35_;
double _t_5_;
double _t_1_;
double _t_7_;
double _t_4_;
double _t_3_;
double _t_2_;
double _t_6_;
double flux_1kc0jc0ic0 = 0;
double _t_0_;
double flux_0kc0jc0ic0 = 0;
double _t_29_;
double _t_25_;
double _t_31_;
double _t_28_;
double _t_27_;
double _t_26_;
double _t_30_;
double _t_24_;
double flux_0kp1jc0ic0 = 0;
double flux_1kp1jc0ic0 = 0;
double _t_17_;
double _t_13_;
double _t_14_;
double _t_15_;
double _t_16_;
double _t_12_;
double _t_23_;
double _t_46_;
double _t_19_;
double _t_21_;
double _t_43_;
double _t_44_;
double _t_20_;
double _t_45_;
double _t_22_;
double _t_47_;
double _t_18_;
double _t_42_;
double _t_38_;
double _t_37_;
double _t_39_;
double _t_40_;
double _t_41_;
double _t_36_;
_t_10_ = -q_4[k][j][i-3];
_t_10_ += q_4[k][j][i+3];
_t_11_ = -q_4[k][j][i-4];
_t_11_ += q_4[k][j][i+4];
_t_8_ = -q_4[k][j][i-1];
_t_8_ += q_4[k][j][i+1];
_t_9_ = -q_4[k][j][i-2];
_t_9_ += q_4[k][j][i+2];
_t_32_ = -q_4[k+1][j][i-1];
_t_32_ += q_4[k+1][j][i+1];
_t_33_ = -q_4[k+1][j][i-2];
_t_33_ += q_4[k+1][j][i+2];
_t_34_ = -q_4[k+1][j][i-3];
_t_34_ += q_4[k+1][j][i+3];
_t_35_ = -q_4[k+1][j][i-4];
_t_35_ += q_4[k+1][j][i+4];
_t_11_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_5_ = -cons_1[k][j][i-4];
_t_5_ += cons_1[k][j][i+4];
_t_11_ += cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_1_ = -0.0035 * _t_5_;
_t_7_ = -0.0035 * _t_11_;
_t_10_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_4_ = -cons_1[k][j][i-3];
_t_4_ += cons_1[k][j][i+3];
_t_1_ += 0.038 * _t_4_;
_t_10_ += cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_7_ += 0.038 * _t_10_;
_t_9_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_3_ = -cons_1[k][j][i-2];
_t_3_ += cons_1[k][j][i+2];
_t_1_ -= 0.2 * _t_3_;
_t_9_ += cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_7_ -= 0.2 * _t_9_;
_t_8_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_2_ = -cons_1[k][j][i-1];
_t_2_ += cons_1[k][j][i+1];
_t_1_ += 0.8 * _t_2_;
_t_8_ += cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_7_ += 0.8 * _t_8_;
_t_6_ = _t_7_ * dxinv0;
flux_1kc0jc0ic0 -= _t_6_;
_t_0_ = _t_1_ * dxinv0;
flux_0kc0jc0ic0 -= _t_0_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
_t_35_ -= cons_1[k+1][j][i-4] * q_1[k+1][j][i-4];
_t_29_ = -cons_1[k+1][j][i-4];
_t_29_ += cons_1[k+1][j][i+4];
_t_35_ += cons_1[k+1][j][i+4] * q_1[k+1][j][i+4];
_t_25_ = -0.0035 * _t_29_;
_t_31_ = -0.0035 * _t_35_;
_t_34_ -= cons_1[k+1][j][i-3] * q_1[k+1][j][i-3];
_t_28_ = -cons_1[k+1][j][i-3];
_t_28_ += cons_1[k+1][j][i+3];
_t_25_ += 0.038 * _t_28_;
_t_34_ += cons_1[k+1][j][i+3] * q_1[k+1][j][i+3];
_t_31_ += 0.038 * _t_34_;
_t_33_ -= cons_1[k+1][j][i-2] * q_1[k+1][j][i-2];
_t_27_ = -cons_1[k+1][j][i-2];
_t_27_ += cons_1[k+1][j][i+2];
_t_25_ -= 0.2 * _t_27_;
_t_33_ += cons_1[k+1][j][i+2] * q_1[k+1][j][i+2];
_t_31_ -= 0.2 * _t_33_;
_t_32_ -= cons_1[k+1][j][i-1] * q_1[k+1][j][i-1];
_t_26_ = -cons_1[k+1][j][i-1];
_t_26_ += cons_1[k+1][j][i+1];
_t_25_ += 0.8 * _t_26_;
_t_32_ += cons_1[k+1][j][i+1] * q_1[k+1][j][i+1];
_t_31_ += 0.8 * _t_32_;
_t_30_ = _t_31_ * dxinv0;
_t_24_ = _t_25_ * dxinv0;
flux_0kp1jc0ic0 -= _t_24_;
flux_0[k+1][j][i] = flux_0kp1jc0ic0;
flux_1kp1jc0ic0 -= _t_30_;
_t_17_ = -cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_17_ += cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_13_ = -0.0035 * _t_17_;
_t_14_ = -cons_1[k][j-1][i] * q_2[k][j-1][i];
_t_14_ += cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_13_ += 0.8 * _t_14_;
_t_15_ = -cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_15_ += cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_13_ -= 0.2 * _t_15_;
_t_16_ = -cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_16_ += cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_13_ += 0.038 * _t_16_;
_t_12_ = _t_13_ * dxinv1;
flux_1kc0jc0ic0 -= _t_12_;
_t_23_ = -cons_1[k-4][j][i] * q_3[k-4][j][i];
_t_23_ += cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_46_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
_t_19_ = -0.0035 * _t_23_;
_t_46_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_21_ = -cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_43_ = 0.038 * _t_46_;
_t_21_ += cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_19_ -= 0.2 * _t_21_;
_t_44_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_44_ -= cons_1[k][j][i] * q_3[k][j][i];
_t_43_ += 0.8 * _t_44_;
_t_20_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_20_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
_t_19_ += 0.8 * _t_20_;
_t_45_ = -cons_1[k-1][j][i] * q_3[k-1][j][i];
_t_45_ += cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_43_ -= 0.2 * _t_45_;
_t_22_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_22_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_19_ += 0.038 * _t_22_;
_t_47_ = -cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_47_ += cons_1[k+5][j][i] * q_3[k+5][j][i];
_t_43_ -= 0.0035 * _t_47_;
_t_18_ = _t_19_ * dxinv2;
flux_1kc0jc0ic0 -= _t_18_;
flux_1[k][j][i] = flux_1kc0jc0ic0;
_t_42_ = _t_43_ * dxinv2;
flux_1kp1jc0ic0 -= _t_42_;
_t_38_ = -cons_1[k+1][j-1][i] * q_2[k+1][j-1][i];
_t_38_ += cons_1[k+1][j+1][i] * q_2[k+1][j+1][i];
_t_37_ = 0.8 * _t_38_;
_t_39_ = -cons_1[k+1][j-2][i] * q_2[k+1][j-2][i];
_t_39_ += cons_1[k+1][j+2][i] * q_2[k+1][j+2][i];
_t_37_ -= 0.2 * _t_39_;
_t_40_ = -cons_1[k+1][j-3][i] * q_2[k+1][j-3][i];
_t_40_ += cons_1[k+1][j+3][i] * q_2[k+1][j+3][i];
_t_37_ += 0.038 * _t_40_;
_t_41_ = -cons_1[k+1][j-4][i] * q_2[k+1][j-4][i];
_t_41_ += cons_1[k+1][j+4][i] * q_2[k+1][j+4][i];
_t_37_ -= 0.0035 * _t_41_;
_t_36_ = _t_37_ * dxinv1;
flux_1kp1jc0ic0 -= _t_36_;
flux_1[k+1][j][i] = flux_1kp1jc0ic0;
}
}
__global__ void hypterm_2 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + 2*(int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_26_;
double _t_27_;
double _t_28_;
double _t_29_;
double _t_32_;
double _t_33_;
double _t_34_;
double _t_35_;
double _t_5_;
double _t_1_;
double _t_25_;
double _t_4_;
double _t_3_;
double _t_2_;
double _t_24_;
double flux_2kc0jc0ic0 = 0;
double _t_0_;
double flux_0kc0jc0ic0 = 0;
double _t_11_;
double _t_7_;
double _t_31_;
double _t_10_;
double _t_9_;
double _t_8_;
double _t_30_;
double _t_6_;
double flux_0kp1jc0ic0 = 0;
double flux_2kp1jc0ic0 = 0;
double _t_17_;
double _t_13_;
double _t_14_;
double _t_15_;
double _t_16_;
double _t_12_;
double _t_20_;
double _t_19_;
double _t_21_;
double _t_22_;
double _t_23_;
double _t_18_;
double _t_41_;
double _t_46_;
double _t_37_;
double _t_39_;
double _t_43_;
double _t_44_;
double _t_38_;
double _t_45_;
double _t_40_;
double _t_47_;
double _t_36_;
double _t_42_;
_t_26_ = -q_4[k][j-1][i];
_t_26_ += q_4[k][j+1][i];
_t_27_ = -q_4[k][j-2][i];
_t_27_ += q_4[k][j+2][i];
_t_28_ = -q_4[k][j-3][i];
_t_28_ += q_4[k][j+3][i];
_t_29_ = -q_4[k][j-4][i];
_t_29_ += q_4[k][j+4][i];
_t_32_ = -q_4[k+1][j-1][i];
_t_32_ += q_4[k+1][j+1][i];
_t_33_ = -q_4[k+1][j-2][i];
_t_33_ += q_4[k+1][j+2][i];
_t_34_ = -q_4[k+1][j-3][i];
_t_34_ += q_4[k+1][j+3][i];
_t_35_ = -q_4[k+1][j-4][i];
_t_35_ += q_4[k+1][j+4][i];
_t_29_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_5_ = -cons_2[k][j-4][i];
_t_5_ += cons_2[k][j+4][i];
_t_29_ += cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_1_ = -0.0035 * _t_5_;
_t_25_ = -0.0035 * _t_29_;
_t_28_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_4_ = -cons_2[k][j-3][i];
_t_4_ += cons_2[k][j+3][i];
_t_1_ += 0.038 * _t_4_;
_t_28_ += cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_25_ += 0.038 * _t_28_;
_t_27_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_3_ = -cons_2[k][j-2][i];
_t_3_ += cons_2[k][j+2][i];
_t_1_ -= 0.2 * _t_3_;
_t_27_ += cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_25_ -= 0.2 * _t_27_;
_t_26_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_2_ = -cons_2[k][j-1][i];
_t_2_ += cons_2[k][j+1][i];
_t_1_ += 0.8 * _t_2_;
_t_26_ += cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_25_ += 0.8 * _t_26_;
_t_24_ = _t_25_ * dxinv1;
flux_2kc0jc0ic0 -= _t_24_;
_t_0_ = _t_1_ * dxinv1;
flux_0kc0jc0ic0 -= _t_0_;
flux_0[k][j][i] += flux_0kc0jc0ic0;
_t_35_ -= cons_2[k+1][j-4][i] * q_2[k+1][j-4][i];
_t_11_ = -cons_2[k+1][j-4][i];
_t_11_ += cons_2[k+1][j+4][i];
_t_35_ += cons_2[k+1][j+4][i] * q_2[k+1][j+4][i];
_t_7_ = -0.0035 * _t_11_;
_t_31_ = -0.0035 * _t_35_;
_t_34_ -= cons_2[k+1][j-3][i] * q_2[k+1][j-3][i];
_t_10_ = -cons_2[k+1][j-3][i];
_t_10_ += cons_2[k+1][j+3][i];
_t_7_ += 0.038 * _t_10_;
_t_34_ += cons_2[k+1][j+3][i] * q_2[k+1][j+3][i];
_t_31_ += 0.038 * _t_34_;
_t_33_ -= cons_2[k+1][j-2][i] * q_2[k+1][j-2][i];
_t_9_ = -cons_2[k+1][j-2][i];
_t_9_ += cons_2[k+1][j+2][i];
_t_7_ -= 0.2 * _t_9_;
_t_33_ += cons_2[k+1][j+2][i] * q_2[k+1][j+2][i];
_t_31_ -= 0.2 * _t_33_;
_t_32_ -= cons_2[k+1][j-1][i] * q_2[k+1][j-1][i];
_t_8_ = -cons_2[k+1][j-1][i];
_t_8_ += cons_2[k+1][j+1][i];
_t_7_ += 0.8 * _t_8_;
_t_32_ += cons_2[k+1][j+1][i] * q_2[k+1][j+1][i];
_t_31_ += 0.8 * _t_32_;
_t_30_ = _t_31_ * dxinv1;
_t_6_ = _t_7_ * dxinv1;
flux_0kp1jc0ic0 -= _t_6_;
flux_0[k+1][j][i] += flux_0kp1jc0ic0;
flux_2kp1jc0ic0 -= _t_30_;
_t_17_ = -cons_2[k][j][i-4] * q_1[k][j][i-4];
_t_17_ += cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_13_ = -0.0035 * _t_17_;
_t_14_ = -cons_2[k][j][i-1] * q_1[k][j][i-1];
_t_14_ += cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_13_ += 0.8 * _t_14_;
_t_15_ = -cons_2[k][j][i-2] * q_1[k][j][i-2];
_t_15_ += cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_13_ -= 0.2 * _t_15_;
_t_16_ = -cons_2[k][j][i-3] * q_1[k][j][i-3];
_t_16_ += cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_13_ += 0.038 * _t_16_;
_t_12_ = _t_13_ * dxinv0;
flux_2kc0jc0ic0 -= _t_12_;
_t_20_ = -cons_2[k+1][j][i-1] * q_1[k+1][j][i-1];
_t_20_ += cons_2[k+1][j][i+1] * q_1[k+1][j][i+1];
_t_19_ = 0.8 * _t_20_;
_t_21_ = -cons_2[k+1][j][i-2] * q_1[k+1][j][i-2];
_t_21_ += cons_2[k+1][j][i+2] * q_1[k+1][j][i+2];
_t_19_ -= 0.2 * _t_21_;
_t_22_ = -cons_2[k+1][j][i-3] * q_1[k+1][j][i-3];
_t_22_ += cons_2[k+1][j][i+3] * q_1[k+1][j][i+3];
_t_19_ += 0.038 * _t_22_;
_t_23_ = -cons_2[k+1][j][i-4] * q_1[k+1][j][i-4];
_t_23_ += cons_2[k+1][j][i+4] * q_1[k+1][j][i+4];
_t_19_ -= 0.0035 * _t_23_;
_t_18_ = _t_19_ * dxinv0;
flux_2kp1jc0ic0 -= _t_18_;
_t_41_ = -cons_2[k-4][j][i] * q_3[k-4][j][i];
_t_41_ += cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_46_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_37_ = -0.0035 * _t_41_;
_t_46_ -= cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_39_ = -cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_43_ = 0.038 * _t_46_;
_t_39_ += cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_37_ -= 0.2 * _t_39_;
_t_44_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
_t_44_ -= cons_2[k][j][i] * q_3[k][j][i];
_t_43_ += 0.8 * _t_44_;
_t_38_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
_t_38_ -= cons_2[k-1][j][i] * q_3[k-1][j][i];
_t_37_ += 0.8 * _t_38_;
_t_45_ = -cons_2[k-1][j][i] * q_3[k-1][j][i];
_t_45_ += cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_43_ -= 0.2 * _t_45_;
_t_40_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
_t_40_ -= cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_37_ += 0.038 * _t_40_;
_t_47_ = -cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_47_ += cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_43_ -= 0.0035 * _t_47_;
_t_36_ = _t_37_ * dxinv2;
flux_2kc0jc0ic0 -= _t_36_;
flux_2[k][j][i] = flux_2kc0jc0ic0;
_t_42_ = _t_43_ * dxinv2;
flux_2kp1jc0ic0 -= _t_42_;
flux_2[k+1][j][i] = flux_2kp1jc0ic0;
}
}
__global__ void hypterm_3 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + 2*(int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_38_;
double _t_45_;
double _t_40_;
double _t_47_;
double _t_4_;
double _t_9_;
double _t_1_;
double _t_37_;
double _t_11_;
double _t_43_;
double _t_7_;
double _t_2_;
double _t_41_;
double _t_46_;
double _t_39_;
double _t_44_;
double _t_10_;
double _t_3_;
double _t_8_;
double _t_5_;
double _t_36_;
double flux_3kc0jc0ic0 = 0;
double _t_42_;
double flux_3kp1jc0ic0 = 0;
double _t_0_;
double flux_0kc0jc0ic0 = 0;
double _t_6_;
double flux_0kp1jc0ic0 = 0;
double _t_17_;
double _t_13_;
double _t_14_;
double _t_15_;
double _t_16_;
double _t_12_;
double _t_20_;
double _t_19_;
double _t_21_;
double _t_22_;
double _t_23_;
double _t_18_;
double _t_29_;
double _t_25_;
double _t_26_;
double _t_27_;
double _t_28_;
double _t_24_;
double _t_32_;
double _t_31_;
double _t_33_;
double _t_34_;
double _t_35_;
double _t_30_;
_t_38_ = q_4[k+1][j][i];
_t_38_ -= q_4[k-1][j][i];
_t_45_ = -q_4[k-1][j][i];
_t_45_ += q_4[k+3][j][i];
_t_40_ = q_4[k+3][j][i];
_t_40_ -= q_4[k-3][j][i];
_t_47_ = -q_4[k-3][j][i];
_t_47_ += q_4[k+5][j][i];
_t_40_ += cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_45_ += cons_3[k+3][j][i] * q_3[k+3][j][i];
_t_4_ = cons_3[k+3][j][i];
_t_9_ = cons_3[k+3][j][i];
_t_4_ -= cons_3[k-3][j][i];
_t_40_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_47_ -= cons_3[k-3][j][i] * q_3[k-3][j][i];
_t_1_ = 0.038 * _t_4_;
_t_37_ = 0.038 * _t_40_;
_t_11_ = -cons_3[k-3][j][i];
_t_11_ += cons_3[k+5][j][i];
_t_47_ += cons_3[k+5][j][i] * q_3[k+5][j][i];
_t_43_ = -0.0035 * _t_47_;
_t_7_ = -0.0035 * _t_11_;
_t_9_ -= cons_3[k-1][j][i];
_t_7_ -= 0.2 * _t_9_;
_t_38_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_45_ -= cons_3[k-1][j][i] * q_3[k-1][j][i];
_t_43_ -= 0.2 * _t_45_;
_t_2_ = -cons_3[k-1][j][i];
_t_2_ += cons_3[k+1][j][i];
_t_1_ += 0.8 * _t_2_;
_t_38_ += cons_3[k+1][j][i] * q_3[k+1][j][i];
_t_37_ += 0.8 * _t_38_;
_t_41_ = -q_4[k-4][j][i];
_t_41_ += q_4[k+4][j][i];
_t_46_ = q_4[k+4][j][i];
_t_46_ -= q_4[k-2][j][i];
_t_39_ = -q_4[k-2][j][i];
_t_39_ += q_4[k+2][j][i];
_t_44_ = q_4[k+2][j][i];
_t_44_ -= q_4[k][j][i];
_t_39_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_46_ -= cons_3[k-2][j][i] * q_3[k-2][j][i];
_t_10_ = -cons_3[k-2][j][i];
_t_3_ = -cons_3[k-2][j][i];
_t_3_ += cons_3[k+2][j][i];
_t_1_ -= 0.2 * _t_3_;
_t_39_ += cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_37_ -= 0.2 * _t_39_;
_t_44_ += cons_3[k+2][j][i] * q_3[k+2][j][i];
_t_8_ = cons_3[k+2][j][i];
_t_8_ -= cons_3[k][j][i];
_t_7_ += 0.8 * _t_8_;
_t_44_ -= cons_3[k][j][i] * q_3[k][j][i];
_t_43_ += 0.8 * _t_44_;
_t_10_ += cons_3[k+4][j][i];
_t_7_ += 0.038 * _t_10_;
_t_41_ += cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_46_ += cons_3[k+4][j][i] * q_3[k+4][j][i];
_t_43_ += 0.038 * _t_46_;
_t_5_ = cons_3[k+4][j][i];
_t_5_ -= cons_3[k-4][j][i];
_t_1_ -= 0.0035 * _t_5_;
_t_41_ -= cons_3[k-4][j][i] * q_3[k-4][j][i];
_t_37_ -= 0.0035 * _t_41_;
_t_36_ = _t_37_ * dxinv2;
flux_3kc0jc0ic0 -= _t_36_;
_t_42_ = _t_43_ * dxinv2;
flux_3kp1jc0ic0 -= _t_42_;
_t_0_ = _t_1_ * dxinv2;
flux_0kc0jc0ic0 -= _t_0_;
flux_0[k][j][i] += flux_0kc0jc0ic0;
_t_6_ = _t_7_ * dxinv2;
flux_0kp1jc0ic0 -= _t_6_;
flux_0[k+1][j][i] += flux_0kp1jc0ic0;
_t_17_ = -cons_3[k][j][i-4] * q_1[k][j][i-4];
_t_17_ += cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_13_ = -0.0035 * _t_17_;
_t_14_ = -cons_3[k][j][i-1] * q_1[k][j][i-1];
_t_14_ += cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_13_ += 0.8 * _t_14_;
_t_15_ = -cons_3[k][j][i-2] * q_1[k][j][i-2];
_t_15_ += cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_13_ -= 0.2 * _t_15_;
_t_16_ = -cons_3[k][j][i-3] * q_1[k][j][i-3];
_t_16_ += cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_13_ += 0.038 * _t_16_;
_t_12_ = _t_13_ * dxinv0;
flux_3kc0jc0ic0 -= _t_12_;
_t_20_ = -cons_3[k+1][j][i-1] * q_1[k+1][j][i-1];
_t_20_ += cons_3[k+1][j][i+1] * q_1[k+1][j][i+1];
_t_19_ = 0.8 * _t_20_;
_t_21_ = -cons_3[k+1][j][i-2] * q_1[k+1][j][i-2];
_t_21_ += cons_3[k+1][j][i+2] * q_1[k+1][j][i+2];
_t_19_ -= 0.2 * _t_21_;
_t_22_ = -cons_3[k+1][j][i-3] * q_1[k+1][j][i-3];
_t_22_ += cons_3[k+1][j][i+3] * q_1[k+1][j][i+3];
_t_19_ += 0.038 * _t_22_;
_t_23_ = -cons_3[k+1][j][i-4] * q_1[k+1][j][i-4];
_t_23_ += cons_3[k+1][j][i+4] * q_1[k+1][j][i+4];
_t_19_ -= 0.0035 * _t_23_;
_t_18_ = _t_19_ * dxinv0;
flux_3kp1jc0ic0 -= _t_18_;
_t_29_ = -cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_29_ += cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_25_ = -0.0035 * _t_29_;
_t_26_ = -cons_3[k][j-1][i] * q_2[k][j-1][i];
_t_26_ += cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_25_ += 0.8 * _t_26_;
_t_27_ = -cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_27_ += cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_25_ -= 0.2 * _t_27_;
_t_28_ = -cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_28_ += cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_25_ += 0.038 * _t_28_;
_t_24_ = _t_25_ * dxinv1;
flux_3kc0jc0ic0 -= _t_24_;
flux_3[k][j][i] = flux_3kc0jc0ic0;
_t_32_ = -cons_3[k+1][j-1][i] * q_2[k+1][j-1][i];
_t_32_ += cons_3[k+1][j+1][i] * q_2[k+1][j+1][i];
_t_31_ = 0.8 * _t_32_;
_t_33_ = -cons_3[k+1][j-2][i] * q_2[k+1][j-2][i];
_t_33_ += cons_3[k+1][j+2][i] * q_2[k+1][j+2][i];
_t_31_ -= 0.2 * _t_33_;
_t_34_ = -cons_3[k+1][j-3][i] * q_2[k+1][j-3][i];
_t_34_ += cons_3[k+1][j+3][i] * q_2[k+1][j+3][i];
_t_31_ += 0.038 * _t_34_;
_t_35_ = -cons_3[k+1][j-4][i] * q_2[k+1][j-4][i];
_t_35_ += cons_3[k+1][j+4][i] * q_2[k+1][j+4][i];
_t_31_ -= 0.0035 * _t_35_;
_t_30_ = _t_31_ * dxinv1;
flux_3kp1jc0ic0 -= _t_30_;
flux_3[k+1][j][i] = flux_3kp1jc0ic0;
}
}
__global__ void hypterm_4 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + 2*(int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_5_;
double _t_1_;
double _t_2_;
double _t_3_;
double _t_4_;
double _t_0_;
double flux_4kc0jc0ic0 = 0;
double _t_11_;
double _t_7_;
double _t_10_;
double _t_8_;
double _t_9_;
double _t_6_;
double flux_4kp1jc0ic0 = 0;
double _t_17_;
double _t_13_;
double _t_14_;
double _t_15_;
double _t_16_;
double _t_12_;
double _t_23_;
double _t_19_;
double _t_20_;
double _t_21_;
double _t_22_;
double _t_18_;
double _t_29_;
double _t_34_;
double _t_25_;
double _t_27_;
double _t_31_;
double _t_32_;
double _t_26_;
double _t_33_;
double _t_28_;
double _t_35_;
double _t_24_;
double _t_30_;
_t_5_ = -cons_4[k][j][i-4] * q_1[k][j][i-4];
_t_5_ -= q_4[k][j][i-4] * q_1[k][j][i-4];
_t_5_ += cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_5_ += q_4[k][j][i+4] * q_1[k][j][i+4];
_t_1_ = -0.0035 * _t_5_;
_t_2_ = -cons_4[k][j][i-1] * q_1[k][j][i-1];
_t_2_ -= q_4[k][j][i-1] * q_1[k][j][i-1];
_t_2_ += cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_2_ += q_4[k][j][i+1] * q_1[k][j][i+1];
_t_1_ += 0.8 * _t_2_;
_t_3_ = -cons_4[k][j][i-2] * q_1[k][j][i-2];
_t_3_ -= q_4[k][j][i-2] * q_1[k][j][i-2];
_t_3_ += cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_3_ += q_4[k][j][i+2] * q_1[k][j][i+2];
_t_1_ -= 0.2 * _t_3_;
_t_4_ = -cons_4[k][j][i-3] * q_1[k][j][i-3];
_t_4_ -= q_4[k][j][i-3] * q_1[k][j][i-3];
_t_4_ += cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_4_ += q_4[k][j][i+3] * q_1[k][j][i+3];
_t_1_ += 0.038 * _t_4_;
_t_0_ = _t_1_ * dxinv0;
flux_4kc0jc0ic0 -= _t_0_;
_t_11_ = -cons_4[k+1][j][i-4] * q_1[k+1][j][i-4];
_t_11_ -= q_4[k+1][j][i-4] * q_1[k+1][j][i-4];
_t_11_ += cons_4[k+1][j][i+4] * q_1[k+1][j][i+4];
_t_11_ += q_4[k+1][j][i+4] * q_1[k+1][j][i+4];
_t_7_ = -0.0035 * _t_11_;
_t_10_ = -cons_4[k+1][j][i-3] * q_1[k+1][j][i-3];
_t_10_ -= q_4[k+1][j][i-3] * q_1[k+1][j][i-3];
_t_10_ += cons_4[k+1][j][i+3] * q_1[k+1][j][i+3];
_t_10_ += q_4[k+1][j][i+3] * q_1[k+1][j][i+3];
_t_7_ += 0.038 * _t_10_;
_t_8_ = -cons_4[k+1][j][i-1] * q_1[k+1][j][i-1];
_t_8_ -= q_4[k+1][j][i-1] * q_1[k+1][j][i-1];
_t_8_ += cons_4[k+1][j][i+1] * q_1[k+1][j][i+1];
_t_8_ += q_4[k+1][j][i+1] * q_1[k+1][j][i+1];
_t_7_ += 0.8 * _t_8_;
_t_9_ = -cons_4[k+1][j][i-2] * q_1[k+1][j][i-2];
_t_9_ -= q_4[k+1][j][i-2] * q_1[k+1][j][i-2];
_t_9_ += cons_4[k+1][j][i+2] * q_1[k+1][j][i+2];
_t_9_ += q_4[k+1][j][i+2] * q_1[k+1][j][i+2];
_t_7_ -= 0.2 * _t_9_;
_t_6_ = _t_7_ * dxinv0;
flux_4kp1jc0ic0 -= _t_6_;
_t_17_ = -cons_4[k][j-4][i] * q_2[k][j-4][i];
_t_17_ -= q_4[k][j-4][i] * q_2[k][j-4][i];
_t_17_ += cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_17_ += q_4[k][j+4][i] * q_2[k][j+4][i];
_t_13_ = -0.0035 * _t_17_;
_t_14_ = -cons_4[k][j-1][i] * q_2[k][j-1][i];
_t_14_ -= q_4[k][j-1][i] * q_2[k][j-1][i];
_t_14_ += cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_14_ += q_4[k][j+1][i] * q_2[k][j+1][i];
_t_13_ += 0.8 * _t_14_;
_t_15_ = -cons_4[k][j-2][i] * q_2[k][j-2][i];
_t_15_ -= q_4[k][j-2][i] * q_2[k][j-2][i];
_t_15_ += cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_15_ += q_4[k][j+2][i] * q_2[k][j+2][i];
_t_13_ -= 0.2 * _t_15_;
_t_16_ = -cons_4[k][j-3][i] * q_2[k][j-3][i];
_t_16_ -= q_4[k][j-3][i] * q_2[k][j-3][i];
_t_16_ += cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_16_ += q_4[k][j+3][i] * q_2[k][j+3][i];
_t_13_ += 0.038 * _t_16_;
_t_12_ = _t_13_ * dxinv1;
flux_4kc0jc0ic0 -= _t_12_;
_t_23_ = -cons_4[k+1][j-4][i] * q_2[k+1][j-4][i];
_t_23_ -= q_4[k+1][j-4][i] * q_2[k+1][j-4][i];
_t_23_ += cons_4[k+1][j+4][i] * q_2[k+1][j+4][i];
_t_23_ += q_4[k+1][j+4][i] * q_2[k+1][j+4][i];
_t_19_ = -0.0035 * _t_23_;
_t_20_ = -cons_4[k+1][j-1][i] * q_2[k+1][j-1][i];
_t_20_ -= q_4[k+1][j-1][i] * q_2[k+1][j-1][i];
_t_20_ += cons_4[k+1][j+1][i] * q_2[k+1][j+1][i];
_t_20_ += q_4[k+1][j+1][i] * q_2[k+1][j+1][i];
_t_19_ += 0.8 * _t_20_;
_t_21_ = -cons_4[k+1][j-2][i] * q_2[k+1][j-2][i];
_t_21_ -= q_4[k+1][j-2][i] * q_2[k+1][j-2][i];
_t_21_ += cons_4[k+1][j+2][i] * q_2[k+1][j+2][i];
_t_21_ += q_4[k+1][j+2][i] * q_2[k+1][j+2][i];
_t_19_ -= 0.2 * _t_21_;
_t_22_ = -cons_4[k+1][j-3][i] * q_2[k+1][j-3][i];
_t_22_ -= q_4[k+1][j-3][i] * q_2[k+1][j-3][i];
_t_22_ += cons_4[k+1][j+3][i] * q_2[k+1][j+3][i];
_t_22_ += q_4[k+1][j+3][i] * q_2[k+1][j+3][i];
_t_19_ += 0.038 * _t_22_;
_t_18_ = _t_19_ * dxinv1;
flux_4kp1jc0ic0 -= _t_18_;
_t_29_ = -cons_4[k-4][j][i] * q_3[k-4][j][i];
_t_29_ -= q_4[k-4][j][i] * q_3[k-4][j][i];
_t_29_ += cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_34_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_29_ += q_4[k+4][j][i] * q_3[k+4][j][i];
_t_34_ += q_4[k+4][j][i] * q_3[k+4][j][i];
_t_25_ = -0.0035 * _t_29_;
_t_34_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
_t_27_ = -cons_4[k-2][j][i] * q_3[k-2][j][i];
_t_27_ -= q_4[k-2][j][i] * q_3[k-2][j][i];
_t_34_ -= q_4[k-2][j][i] * q_3[k-2][j][i];
_t_31_ = 0.038 * _t_34_;
_t_27_ += cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_32_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_27_ += q_4[k+2][j][i] * q_3[k+2][j][i];
_t_25_ -= 0.2 * _t_27_;
_t_32_ += q_4[k+2][j][i] * q_3[k+2][j][i];
_t_32_ -= cons_4[k][j][i] * q_3[k][j][i];
_t_32_ -= q_4[k][j][i] * q_3[k][j][i];
_t_31_ += 0.8 * _t_32_;
_t_26_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_26_ += q_4[k+1][j][i] * q_3[k+1][j][i];
_t_26_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
_t_33_ = -cons_4[k-1][j][i] * q_3[k-1][j][i];
_t_26_ -= q_4[k-1][j][i] * q_3[k-1][j][i];
_t_25_ += 0.8 * _t_26_;
_t_33_ -= q_4[k-1][j][i] * q_3[k-1][j][i];
_t_33_ += cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_28_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_28_ += q_4[k+3][j][i] * q_3[k+3][j][i];
_t_33_ += q_4[k+3][j][i] * q_3[k+3][j][i];
_t_31_ -= 0.2 * _t_33_;
_t_28_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
_t_35_ = -cons_4[k-3][j][i] * q_3[k-3][j][i];
_t_28_ -= q_4[k-3][j][i] * q_3[k-3][j][i];
_t_25_ += 0.038 * _t_28_;
_t_35_ -= q_4[k-3][j][i] * q_3[k-3][j][i];
_t_35_ += cons_4[k+5][j][i] * q_3[k+5][j][i];
_t_35_ += q_4[k+5][j][i] * q_3[k+5][j][i];
_t_31_ -= 0.0035 * _t_35_;
_t_24_ = _t_25_ * dxinv2;
flux_4kc0jc0ic0 -= _t_24_;
flux_4[k][j][i] = flux_4kc0jc0ic0;
_t_30_ = _t_31_ * dxinv2;
flux_4kp1jc0ic0 -= _t_30_;
flux_4[k+1][j][i] = flux_4kp1jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig_1 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hypterm_1 <<<gridconfig_1, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hypterm_2 <<<gridconfig_2, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_3 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hypterm_3 <<<gridconfig_3, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_4 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hypterm_4 <<<gridconfig_4, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
}
|
9,623 | #include "includes.h"
__global__ void gpu_stencil37_hack1_cp_cols(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0)&& threadIdx.x==0 && threadIdx.z==0){
printf("copy cols begin\n");
printf("gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = blockDim.y * blockIdx.y;
int base_global_col = tile_x * blockIdx.x;
int area_dst = n_rows*n_cols;
int area_shared = gridDim.x*n_rows*2;
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0&&threadIdx.x==0&&threadIdx.z==0)){
printf("area_shared=%d\n",area_shared);
}
#endif
int base_global_idx = base_global_slice*area_dst + base_global_row * n_cols + base_global_col;
int nextCol= base_global_col+1;
bool legalNextCol = (nextCol<n_cols)?1:0;
int ty = threadIdx.y;
bool legalCurRow = (base_global_row + ty)<n_rows;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
int idx_dst =base_global_idx + tz*area_dst + ty*n_cols ;
int idx = (base_global_slice+tz)*area_shared + blockIdx.x*2*n_rows+blockIdx.y*blockDim.y+ty;
if(legalCurRow && legalCurSlice){
shared_cols[idx] = dst[idx_dst];
}
if(legalCurRow && legalCurSlice && legalNextCol){
shared_cols[idx + n_rows] = dst[idx_dst + 1];
}
__syncthreads();
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 && (threadIdx.x==0)){
// printf("shared_cols: addr:%d, val = %f\n", threadIdx.y,shared_cols[threadIdx.y]);
}
#endif
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0 && threadIdx.x==0 && threadIdx.z==0)){
printf("copy cols end!\n");
}
#endif
} |
9,624 | //
// matrix_multiply.cu
//
// matrix multiplication with CUDA multithreading
//
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
// DEFAULT_DIM
//
// the default matrix dimension
//
#define DEFAULT_DIM 16
// CUDA_ERROR
//
// CUDA error check
//
#define CUDA_ERROR(error) { cudaGpuErrorAssert((error), __FILE__, __LINE__); }
inline
void
cudaGpuErrorAssert (
cudaError_t error,
const char *file,
int line
) {
if (error != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(error), file, line);
exit(error);
}
}
// cuda_multiply_matrix
//
// CUDA kernel for multiplying matrix
//
__global__
void
cuda_multiply_matrix (
int *result,
int *a,
int *b,
int dim
) {
// multiply each cell of this row with each cell of the column
int value = 0;
for (int k = 0; k < dim; ++k) {
value += a[blockIdx.x * dim + k] * b[k * dim + blockIdx.y];
}
result[blockIdx.x * dim + blockIdx.y] = value;
}
// gpu_multiply_matrix
//
// multiply matrices using the GPU
//
__host__
void
gpu_multiply_matrix (
int *result,
int *a,
int *b,
int dim
) {
int *result_d;
int *a_d;
int *b_d;
// get the size of the matrix to allocate
int size = dim * dim * sizeof(int);
// allocate the memory on the GPU
CUDA_ERROR(cudaMalloc(&result_d, size));
CUDA_ERROR(cudaMalloc(&a_d, size));
CUDA_ERROR(cudaMalloc(&b_d, size));
// copy the memory to the GPU
CUDA_ERROR(cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMemcpy(b_d, b, size, cudaMemcpyHostToDevice));
// start GPU kernel with one thread for each cell of the result matrix
cuda_multiply_matrix<<<dim3(dim, dim), 1>>>(result_d, a_d, b_d, dim);
// copy memory back from the GPU
CUDA_ERROR(cudaMemcpy(result, result_d, size, cudaMemcpyDeviceToHost));
}
// cpu_multiply_matrix
//
// multiply matrices using the CPU
//
__host__
void
cpu_multiply_matrix (
int *result,
int *a,
int *b,
int dim
) {
// multiply matrices
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
int value = 0;
for (int k = 0; k < dim; ++k) {
value += a[i * dim + k] * b[k * dim + j];
}
result[i * dim + j] = value;
}
}
}
// allocate_matrix
//
// allocate a matrix of square dimensions
//
__host__
int *
allocate_matrix (
int dim
) {
int *m;
// check matrix size
if (dim <= 0) {
return NULL;
}
// allocate matrix
m = (int *)malloc(dim * dim * sizeof(int));
if (m == NULL) {
perror("Failed to allocate memory for matrix");
exit(errno);
}
return m;
}
// generate_matrix
//
// generate a matrix of square dimensions with random 0 or 1 values in cells
//
__host__
int *
generate_matrix (
int dim
) {
// allocate a matrix
int *m = allocate_matrix(dim);
if (m != NULL) {
// generate random values 0 or 1 for each cell
for (int i = 0; i < dim; ++i) {
for (int j = 0; j < dim; ++j) {
m[i * dim + j] = rand() % 2;
}
}
}
return m;
}
// print_matrix
//
// print matrix of square dimension
//
__host__
void
print_matrix (
int *m,
int dim
) {
for (int i = 0; i < dim; ++i) {
printf("[");
for (int j = 0; j < dim; ++j) {
printf(" % 2.2d", m[i * dim + j]);
}
printf("]\n");
}
}
// main
//
// multiply matrices using the CPU and GPU with time measurement
//
int
main (
int argc,
char *argv[]
) {
cudaEvent_t cpu_start;
cudaEvent_t cpu_finish;
cudaEvent_t gpu_start;
cudaEvent_t gpu_finish;
float measurement;
int verbose = 0;
int dim = DEFAULT_DIM;
int *result;
int *a;
int *b;
// parse arguments
for (int i = 1; i < argc; ++i) {
if ((strcasecmp(argv[i], "-v") == 0) || (strcasecmp(argv[i], "--verbose") == 0)) {
// verbose enabled
verbose = 1;
} else {
// dimension specified
dim = atoi(argv[i]);
if (dim <= 0) {
printf("Usage: matrix_multiply [-v/--verbose] [dimension=%d]\nMultiply a square matrix of specified dimension\n", DEFAULT_DIM);
return 0;
}
}
}
// create start and finish events
CUDA_ERROR(cudaEventCreate(&cpu_start));
CUDA_ERROR(cudaEventCreate(&cpu_finish));
CUDA_ERROR(cudaEventCreate(&gpu_start));
CUDA_ERROR(cudaEventCreate(&gpu_finish));
// generate matrices randomly
result = allocate_matrix(dim);
a = generate_matrix(dim);
b = generate_matrix(dim);
// print matrices
if (verbose != 0) {
printf("A:\n");
print_matrix(a, dim);
printf("B:\n");
print_matrix(b, dim);
}
// get start time
CUDA_ERROR(cudaEventRecord(cpu_start));
// multiply the matrices on the CPU
cpu_multiply_matrix(result, a, b, dim);
// get finish time
CUDA_ERROR(cudaEventRecord(cpu_finish));
CUDA_ERROR(cudaEventSynchronize(cpu_finish));
// print result
if (verbose != 0) {
printf("CPU Result:\n");
print_matrix(result, dim);
}
// print time measurement
CUDA_ERROR(cudaEventElapsedTime(&measurement, cpu_start, cpu_finish));
printf("CPU Time: %f\n", measurement / 1000.0);
// get start time
CUDA_ERROR(cudaEventRecord(gpu_start));
// multiple the matrices on the GPU
gpu_multiply_matrix(result, a, b, dim);
// get finish time
CUDA_ERROR(cudaEventRecord(gpu_finish));
CUDA_ERROR(cudaEventSynchronize(gpu_finish));
// print result
if (verbose != 0) {
printf("GPU Result:\n");
print_matrix(result, dim);
}
// print time measurement
CUDA_ERROR(cudaEventElapsedTime(&measurement, gpu_start, gpu_finish));
printf("GPU Time: %f\n", measurement / 1000.0);
return 0;
}
|
9,625 | #include "includes.h"
#define TILE_WIDTH 32
#define COMMENT "Centrist_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
__global__ void mod_CENTRIST(PPMPixel *image_out, PPMPixel *image_cp, int columns, int rows, int *hist, int hist_len) {
int col = TILE_WIDTH * blockIdx.x + threadIdx.x;
int row = TILE_WIDTH * blockIdx.y + threadIdx.y;
__shared__ int hist_private[512];
int hist_index = (threadIdx.y*TILE_WIDTH + threadIdx.x); //get index in shared histogram
if(hist_index < hist_len) hist_private[hist_index] = 0;
__syncthreads();
if(col < columns && row < rows)
{
//create and copy small chunks to shared memory
__shared__ unsigned char image_cp_private[TILE_WIDTH][TILE_WIDTH];
//convert to grayscale
int img_index = row * columns + col; //get index in original image
int grayscale = (image_cp[img_index].red*299 + image_cp[img_index].green*587 + image_cp[img_index].blue*114)/1000; //avoid float point errors
image_cp_private[threadIdx.y][threadIdx.x] = grayscale;
__syncthreads();
if(col < columns - 2 && row < rows - 2) //ignore first/last row/column
{
int r, c, rr, cc;
float mean = 0.0;
for(r = threadIdx.y, rr = row; r <= threadIdx.y + 2; r++, rr++)
for(c = threadIdx.x , cc = col; c <= threadIdx.x + 2; c++, cc++)
{
if(r < TILE_WIDTH && c < TILE_WIDTH)
{
mean += image_cp_private[r][c];
}
else
{
int grayscale_neigh = (image_cp[rr*columns + cc].red*299 + image_cp[rr*columns + cc].green*587 + image_cp[rr*columns + cc].blue*114)/1000;
mean += grayscale_neigh;
}
}
mean /= 9.0;
int value = 0, k = 8;
for(r = threadIdx.y, rr = row ; r <= threadIdx.y + 2; r++, rr++)
for(c = threadIdx.x, cc = col ; c <= threadIdx.x + 2; c++, cc++)
{
if(r < TILE_WIDTH && c < TILE_WIDTH)
{
if(1.0*image_cp_private[r][c] >= mean)
value |= 1<<k;
}
else
{
int grayscale_neigh = (image_cp[rr*columns + cc].red*299 + image_cp[rr*columns + cc].green*587 + image_cp[rr*columns + cc].blue*114)/1000;
if(grayscale_neigh >= mean)
value |= 1<<k;
}
k--;
}
int img_out_ind = row * (columns - 2) + col; //get index in ouput original
image_out[img_out_ind].red = image_out[img_out_ind].blue = image_out[img_out_ind].green = value;
atomicAdd(&(hist_private[value]), 1);
}
__syncthreads();
if(hist_index == 0)
{
for(int i = 0; i < hist_len; i++)
atomicAdd(&(hist[i]), hist_private[i]); //init shared histogram
}
}
} |
9,626 | #include "includes.h"
__global__ void device_len_dot ()
{
__shared__ float partial_len[REDUC_THREADS], partial_dot[REDUC_THREADS] ;
int i, n, index ;
float sum_len, sum_dot ;
index = threadIdx.x ;
n = d_n_inputs_cols * d_nhid ;
sum_len = sum_dot = 0.0f ;
for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) {
sum_len += d_w_grad[i] * d_w_grad[i] ;
sum_dot += d_w_grad[i] * d_prev_grad[i] ;
d_prev_grad[i] = d_w_grad[i] ;
}
partial_len[index] = sum_len ;
partial_dot[index] = sum_dot ;
__syncthreads() ;
for (i=blockDim.x>>1 ; i ; i>>=1) {
if (index < i) {
partial_len[index] += partial_len[index+i] ;
partial_dot[index] += partial_dot[index+i] ;
}
__syncthreads() ;
}
if (index == 0) {
d_len_out[blockIdx.x] = partial_len[0] ;
d_dot_out[blockIdx.x] = partial_dot[0] ;
}
} |
9,627 | #include "includes.h"
__global__ void kernel(int *A, int *B, int *counter, int n) {
int tid = threadIdx.x;
if (tid < n) {
for (int j=0; j<n; j++) {
counter[tid*n+j]++;
A[tid*n+j] = B[tid*n+j];
}
}
} |
9,628 | #include "node.hh"
#include <iostream>
namespace rt
{
const char* Node::OP_NAMES[37] =
{
"mat_mat_mul",
"mat_rvect_add",
"sigmoid",
"mse",
"softmax",
"log_softmax",
"softmax_cross_entropy",
"conv2d",
"relu",
"relu_leaky",
"tanh",
"mse_grad",
"sigmoid_grad",
"mat_mul_add",
"tmat_mat_mul",
"mat_tmat_mul",
"mat_sum_rows",
"mat_sum_cols",
"softmax_cross_entropy_grad",
"relu_grad",
"conv2d_bias_add",
"update",
"sigmoid_cross_entropy",
"sigmoid_cross_entropy_grad"
"sigmoid_cross_entropy_grad",
"conv2d_input_grad",
"conv2d_kernel_grad",
"argmax_acc",
"moment_update",
"moment_update2",
"adam_update",
"leaky_relu_grad",
"conv2d_add_bias_grad",
"tanh_grad",
"conv2d_transpose",
"conv2d_transpose_input_grad",
"conv2d_transpose_kernel_grad",
"add"
};
Node* Node::nop(const std::vector<Node*>& preds)
{
return new Node(OP_NOP, preds);
}
Node* Node::op_mat_mat_mul(const dbl_t* left, const dbl_t* right, dbl_t* output,
std::size_t rowsl, std::size_t colsl, std::size_t colsr,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MAT_MAT_MUL, preds);
res->in1 = left;
res->in2 = right;
res->out1 = output;
res->len1 = rowsl;
res->len2 = colsl;
res->len3 = colsr;
return res;
}
Node* Node::op_mat_rvect_add(const dbl_t* left, const dbl_t* right, dbl_t* output,
std::size_t rows, std::size_t cols,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MAT_RVECT_ADD, preds);
res->in1 = left;
res->in2 = right;
res->out1 = output;
res->len1 = rows;
res->len2 = cols;
return res;
}
Node* Node::op_sigmoid(const dbl_t* args, dbl_t* output, std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_SIGMOID, preds);
res->in1 = args;
res->out1 = output;
res->len1 = len;
return res;
}
Node* Node::op_mse(const dbl_t* y, const dbl_t* y_hat, dbl_t* out,
std::size_t rows, std::size_t cols,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MSE, preds);
res->in1 = y;
res->in2 = y_hat;
res->out1 = out;
res->len1 = rows;
res->len2 = cols;
return res;
}
Node* Node::op_mse_grad(const dbl_t* y, const dbl_t* y_hat, dbl_t* out,
std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MSE_GRAD, preds);
res->in1 = y;
res->in2 = y_hat;
res->out1 = out;
res->len1 = len;
return res;
}
Node* Node::op_conv2d(const dbl_t* input, const dbl_t* kernel,
const int strides[], int pad_top, int pad_left,
dbl_t* output,
const int input_size[], const int kernel_size[],
const std::vector<Node*>& preds)
{
auto res = new Node(OP_CONV2D, preds);
res->in1 = input;
res->in2 = kernel;
res->intconst[0] = strides[0];
res->intconst[1] = strides[1];
res->int_cons1 = pad_top;
res->int_cons2 = pad_left;
res->out1 = output;
res->sizes1[0] = input_size[0];
res->sizes1[1] = input_size[1];
res->sizes1[2] = input_size[2];
res->sizes1[3] = input_size[3];
res->sizes2[0] = kernel_size[0];
res->sizes2[1] = kernel_size[1];
res->sizes2[2] = kernel_size[2];
res->sizes2[3] = kernel_size[3];
return res;
}
Node* Node::op_conv2d_bias_add(const dbl_t* z, const dbl_t* bias, dbl_t* output,
const int input_size[], const std::vector<Node*>& preds)
{
auto res = new Node(OP_CONV2D_BIAS_ADD, preds);
res->in1 = z;
res->in2 = bias;
res->out1 = output;
res->sizes1[0] = input_size[0];
res->sizes1[1] = input_size[1];
res->sizes1[2] = input_size[2];
res->sizes1[3] = input_size[3];
return res;
}
Node* Node::op_conv2d_bias_add_grad(const dbl_t* z, const int size[],
dbl_t* output,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_CONV2D_BIAS_ADD_GRAD, preds);
res->in1 = z;
res->sizes1[0] = size[0];
res->sizes1[1] = size[1];
res->sizes1[2] = size[2];
res->sizes1[3] = size[3];
res->out1 = output;
return res;
}
Node* Node::op_conv2d_input_grad(const dbl_t* y, const dbl_t* kernel, const int strides[],
dbl_t* output, const int y_size[], const int kernel_size[],
const int input_size[],
const std::vector<Node*>& preds)
{
auto res = new Node(OP_CONV2D_INPUT_GRAD, preds);
res->in1 = y;
res->in2 = kernel;
res->out1 = output;
res->intconst[0] = strides[0];
res->intconst[1] = strides[1];
res->intconst2[0] = input_size[1];
res->intconst2[1] = input_size[2];
res->sizes1[0] = y_size[0];
res->sizes1[1] = y_size[1];
res->sizes1[2] = y_size[2];
res->sizes1[3] = y_size[3];
res->sizes2[0] = kernel_size[0];
res->sizes2[1] = kernel_size[1];
res->sizes2[2] = kernel_size[2];
res->sizes2[3] = kernel_size[3];
return res;
}
Node* Node::op_conv2d_kernel_grad(const dbl_t* y, const dbl_t* input, const int strides[],
dbl_t* output, const int y_size[], const int input_size[],
const int padded_size[],
const std::vector<Node*>& preds)
{
auto res = new Node(OP_CONV2D_KERNEL_GRAD, preds);
res->in1 = y;
res->in2 = input;
res->out1 = output;
res->intconst[0] = strides[0];
res->intconst[1] = strides[1];
res->intconst2[0] = padded_size[0];
res->intconst2[1] = padded_size[1];
res->sizes1[0] = y_size[0];
res->sizes1[1] = y_size[1];
res->sizes1[2] = y_size[2];
res->sizes1[3] = y_size[3];
res->sizes2[0] = input_size[0];
res->sizes2[1] = input_size[1];
res->sizes2[2] = input_size[2];
res->sizes2[3] = input_size[3];
return res;
}
Node* Node::op_conv2d_transpose(const dbl_t* input, const dbl_t* kernel, const int out_size[],
const int strides[], dbl_t* output, const int input_size[],
const int kernel_size[], const std::vector<Node*>& preds)
{
auto res = new Node(OP_CONV2D_TRANSPOSE, preds);
res->in1 = input;
res->in2 = kernel;
res->out1 = output;
res->sizes1[0] = out_size[0];
res->sizes1[1] = out_size[1];
res->sizes1[2] = out_size[2];
res->sizes1[3] = out_size[3];
res->intconst[0] = strides[0];
res->intconst[1] = strides[1];
res->sizes2[0] = input_size[0];
res->sizes2[1] = input_size[1];
res->sizes2[2] = input_size[2];
res->sizes2[3] = input_size[3];
res->sizes3[0] = kernel_size[0];
res->sizes3[1] = kernel_size[1];
res->sizes3[2] = kernel_size[2];
res->sizes3[3] = kernel_size[3];
return res;
}
Node* Node::op_conv2d_transpose_input_grad(const dbl_t* y, const dbl_t* kernel, const int strides[],
dbl_t* output, const int y_size[], const int kernel_size[],
const int input_size[],
const std::vector<Node*>& preds)
{
auto res = new Node(OP_CONV2D_TRANSPOSE_INPUT_GRAD, preds);
res->in1 = y;
res->in2 = kernel;
res->out1 = output;
res->intconst[0] = strides[0];
res->intconst[1] = strides[1];
res->intconst2[0] = input_size[1];
res->intconst2[1] = input_size[2];
res->sizes1[0] = y_size[0];
res->sizes1[1] = y_size[1];
res->sizes1[2] = y_size[2];
res->sizes1[3] = y_size[3];
res->sizes2[0] = kernel_size[0];
res->sizes2[1] = kernel_size[1];
res->sizes2[2] = kernel_size[2];
res->sizes2[3] = kernel_size[3];
return res;
}
Node* Node::op_conv2d_transpose_kernel_grad(const dbl_t* y, const dbl_t* input,
const int strides[],
dbl_t* output, const int y_size[],
const int input_size[], const int kernel_size[],
const std::vector<Node*>& preds)
{
auto res = new Node(OP_CONV2D_TRANSPOSE_KERNEL_GRAD, preds);
res->in1 = y;
res->in2 = input;
res->out1 = output;
res->intconst[0] = strides[0];
res->intconst[1] = strides[1];
res->sizes1[0] = y_size[0];
res->sizes1[1] = y_size[1];
res->sizes1[2] = y_size[2];
res->sizes1[3] = y_size[3];
res->sizes2[0] = input_size[0];
res->sizes2[1] = input_size[1];
res->sizes2[2] = input_size[2];
res->sizes2[3] = input_size[3];
res->sizes3[0] = kernel_size[0];
res->sizes3[1] = kernel_size[1];
res->sizes3[2] = kernel_size[2];
res->sizes3[3] = kernel_size[3];
return res;
}
Node* Node::op_softmax(const dbl_t* args, dbl_t* output,
std::size_t rows, std::size_t cols,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_SOFTMAX, preds);
res->in1 = args;
res->out1 = output;
res->len1 = rows;
res->len2 = cols;
return res;
}
Node* Node::op_log_softmax(const dbl_t* args, dbl_t* out,
std::size_t rows, std::size_t cols,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_LOG_SOFTMAX, preds);
res->in1 = args;
res->out1 = out;
res->len1 = rows;
res->len2 = cols;
return res;
}
Node* Node::op_softmax_cross_entropy(const dbl_t* y, const dbl_t* logits, dbl_t* out,
std::size_t rows, std::size_t cols,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_SOFTMAX_CROSS_ENTROPY, preds);
res->in1 = y;
res->in2 = logits;
res->out1 = out;
res->len1 = rows;
res->len2 = cols;
return res;
}
Node* Node::op_relu(const dbl_t* args, dbl_t* output, std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_RELU, preds);
res->in1 = args;
res->out1 = output;
res->len1 = len;
return res;
}
Node* Node::op_relu_leaky(const dbl_t* args, dbl_t* output, std::size_t len,
const dbl_t alpha,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_RELU_LEAKY, preds);
res->in1 = args;
res->out1 = output;
res->len1 = len;
res->alpha_leaky = alpha;
return res;
}
Node* Node::op_tanh(const dbl_t* args, dbl_t* output, std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_TANH, preds);
res->in1 = args;
res->out1 = output;
res->len1 = len;
return res;
}
Node* Node::op_sigmoid_grad(const dbl_t* sig_out, const dbl_t* dout, dbl_t* out,
std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_SIGMOID_GRAD, preds);
res->in1 = sig_out;
res->in2 = dout;
res->out1 = out;
res->len1 = len;
return res;
}
Node* Node::op_mat_mul_add(const dbl_t* x, const dbl_t* w, const dbl_t* b,
dbl_t* output,
std::size_t rowsx, std::size_t colsx, std::size_t colsw,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MAT_MUL_ADD, preds);
res->in1 = x;
res->in2 = w;
res->in3 = b;
res->out1 = output;
res->len1 = rowsx;
res->len2 = colsx;
res->len3 = colsw;
return res;
}
Node* Node::op_tmat_mat_mul(const dbl_t* left, const dbl_t* right, dbl_t* output,
std::size_t rowsl, std::size_t colsl, std::size_t colsr,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_TMAT_MAT_MUL, preds);
res->in1 = left;
res->in2 = right;
res->out1 = output;
res->len1 = rowsl;
res->len2 = colsl;
res->len3 = colsr;
return res;
}
Node* Node::op_mat_tmat_mul(const dbl_t* left, const dbl_t* right, dbl_t* output,
std::size_t rowsl, std::size_t colsl, std::size_t colsr,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MAT_TMAT_MUL, preds);
res->in1 = left;
res->in2 = right;
res->out1 = output;
res->len1 = rowsl;
res->len2 = colsl;
res->len3 = colsr;
return res;
}
Node* Node::op_mat_sum_rows(const dbl_t* arg, dbl_t* out,
std::size_t rows, std::size_t cols,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MAT_SUM_ROWS, preds);
res->in1 = arg;
res->out1 = out;
res->len1 = rows;
res->len2 = cols;
return res;
}
Node* Node::op_mat_sum_cols(const dbl_t* arg, dbl_t* out,
std::size_t rows, std::size_t cols,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MAT_SUM_COLS, preds);
res->in1 = arg;
res->out1 = out;
res->len1 = rows;
res->len2 = cols;
return res;
}
Node* Node::op_softmax_cross_entropy_grad(const dbl_t* y, const dbl_t* logits, dbl_t* out,
std::size_t rows, std::size_t cols,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_SOFTMAX_CROSS_ENTROPY_GRAD, preds);
res->in1 = y;
res->in2 = logits;
res->out1 = out;
res->len1 = rows;
res->len2 = cols;
return res;
}
Node* Node::op_relu_grad(const dbl_t* z, const dbl_t* dout, dbl_t* out,
std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_RELU_GRAD, preds);
res->in1 = z;
res->in2 = dout;
res->out1 = out;
res->len1 = len;
return res;
}
Node* Node::op_update(dbl_t* var, const dbl_t* dt, const dbl_t* coeff,
std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_UPDATE, preds);
res->in1 = dt;
res->in2 = coeff;
res->out1 = var;
res->len1 = len;
return res;
}
Node* Node::op_sigmoid_cross_entropy(const dbl_t* y, const dbl_t* logits, dbl_t* out,
std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_SIGMOID_CROSS_ENTROPY, preds);
res->in1 = y;
res->in2 = logits;
res->out1 = out;
res->len1 = len;
return res;
}
Node* Node::op_sigmoid_cross_entropy_grad(const dbl_t* y, const dbl_t* logits, dbl_t* out,
std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_SIGMOID_CROSS_ENTROPY_GRAD, preds);
res->in1 = y;
res->in2 = logits;
res->out1 = out;
res->len1 = len;
return res;
}
Node* Node::op_tanh_grad(const dbl_t* tanh_out, const dbl_t* dout, dbl_t* out,
std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_TANH_GRAD, preds);
res->in1 = tanh_out;
res->in2 = dout;
res->out1 = out;
res->len1 = len;
return res;
}
Node* Node::op_argmax_acc(const dbl_t* y, const dbl_t* y_hat, dbl_t* out,
std::size_t rows, std::size_t cols,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_ARGMAX_ACC, preds);
res->in1 = y;
res->in2 = y_hat;
res->out1 = out;
res->len1 = rows;
res->len2 = cols;
return res;
}
Node* Node::op_moment_update(dbl_t* var, const dbl_t* dt,
dbl_t coeff1, dbl_t coeff2, std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MOMENT_UPDATE, preds);
res->in1 = dt;
res->out1 = var;
res->len1 = len;
res->cons1 = coeff1;
res->cons2 = coeff2;
return res;
}
Node* Node::op_moment_update2(dbl_t* var, const dbl_t* dt,
dbl_t coeff1, dbl_t coeff2, std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_MOMENT_UPDATE2, preds);
res->in1 = dt;
res->out1 = var;
res->len1 = len;
res->cons1 = coeff1;
res->cons2 = coeff2;
return res;
}
Node* Node::op_adam_update(dbl_t* var, dbl_t* t, const dbl_t* m, const dbl_t* v,
dbl_t lr, dbl_t beta1, dbl_t beta2, dbl_t eps,
std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_ADAM_UPDATE, preds);
res->in1 = m;
res->in2 = v;
res->out1 = var;
res->out2 = t;
res->len1 = len;
res->cons1 = lr;
res->cons2 = beta1;
res->cons3 = beta2;
res->cons4 = eps;
return res;
}
Node* Node::op_leaky_relu_grad(const dbl_t* z, const dbl_t* dout, dbl_t* out,
dbl_t alpha, std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_LEAKY_RELU_GRAD, preds);
res->in1 = z;
res->in2 = dout;
res->out1 = out;
res->len1 = len;
res->cons1 = alpha;
return res;
}
Node* Node::op_add(const dbl_t* a, const dbl_t* b, dbl_t* out,
std::size_t len,
const std::vector<Node*>& preds)
{
auto res = new Node(OP_ADD, preds);
res->in1 = a;
res->in2 = b;
res->out1 = out;
res->len1 = len;
return res;
}
Node::Node(int type, std::vector<Node*> preds)
: type(type)
, use_simd(false)
, in1(nullptr)
, in2(nullptr)
, in3(nullptr)
, out1(nullptr)
, out2(nullptr)
, len1(0)
, len2(0)
, len3(0)
{
for (auto n : preds)
{
if (n)
{
n->succs.push_back(this);
this->preds.push_back(n);
}
}
}
}
|
9,629 | #include "includes.h"
__global__ void process_kernel3(const float* input, float* output, int numElements){
int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x;
int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum;
if(globalThreadId < numElements)
output[globalThreadId] = (float)sqrt(input[globalThreadId]);
} |
9,630 | #include "includes.h"
__global__ void check_for_neuron_spikes_kernel(float *d_membrane_potentials_v, float *d_thresholds_for_action_potential_spikes, float *d_resting_potentials, float* d_last_spike_time_of_each_neuron, unsigned char* d_bitarray_of_neuron_spikes, int bitarray_length, int bitarray_maximum_axonal_delay_in_timesteps, float current_time_in_seconds, float timestep, size_t total_number_of_neurons, bool high_fidelity_spike_flag) {
// Get thread IDs
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < total_number_of_neurons) {
if (d_membrane_potentials_v[idx] >= d_thresholds_for_action_potential_spikes[idx]) {
// Set current time as last spike time of neuron
d_last_spike_time_of_each_neuron[idx] = current_time_in_seconds;
// Reset membrane potential
d_membrane_potentials_v[idx] = d_resting_potentials[idx];
// High fidelity spike storage
if (high_fidelity_spike_flag){
// Get start of the given neuron's bits
int neuron_id_spike_store_start = idx * bitarray_length;
// Get offset depending upon the current timestep
int offset_index = (int)(round((float)(current_time_in_seconds / timestep))) % bitarray_maximum_axonal_delay_in_timesteps;
int offset_byte = offset_index / 8;
int offset_bit_pos = offset_index - (8 * offset_byte);
// Get the specific position at which we should be putting the current value
unsigned char byte = d_bitarray_of_neuron_spikes[neuron_id_spike_store_start + offset_byte];
// Set the specific bit in the byte to on
byte |= (1 << offset_bit_pos);
// Assign the byte
d_bitarray_of_neuron_spikes[neuron_id_spike_store_start + offset_byte] = byte;
}
} else {
// High fidelity spike storage
if (high_fidelity_spike_flag){
// Get start of the given neuron's bits
int neuron_id_spike_store_start = idx * bitarray_length;
// Get offset depending upon the current timestep
int offset_index = (int)(round((float)(current_time_in_seconds / timestep))) % bitarray_maximum_axonal_delay_in_timesteps;
int offset_byte = offset_index / 8;
int offset_bit_pos = offset_index - (8 * offset_byte);
// Get the specific position at which we should be putting the current value
unsigned char byte = d_bitarray_of_neuron_spikes[neuron_id_spike_store_start + offset_byte];
// Set the specific bit in the byte to on
byte &= ~(1 << offset_bit_pos);
// Assign the byte
d_bitarray_of_neuron_spikes[neuron_id_spike_store_start + offset_byte] = byte;
}
}
idx += blockDim.x * gridDim.x;
}
__syncthreads();
} |
9,631 | extern "C"
__global__ void random_matrix(int n)
{
} |
9,632 | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <getopt.h>
#include <assert.h>
#include <errno.h>
#include <time.h>
#include <limits.h>
#include <sys/sysinfo.h>
#include <sys/types.h>
#include <signal.h>
#include <ctype.h>
#include <locale.h>
typedef struct {
#define KEYLEN 256
uint32_t state[KEYLEN];
} KEY;
typedef struct {
#define BUFLEN 16
uint32_t version;
unsigned char master_pass[BUFLEN];
} HEADER;
typedef struct {
unsigned char site[BUFLEN * 2];
unsigned char user[BUFLEN];
unsigned char pass[BUFLEN];
} ENTRY;
uint32_t trialSeed = 0;
uint32_t startSeed = 0;
uint32_t *d_trialSeed;
uint32_t *cudaBlockSeed;
uint32_t *d_cudaBlockSeed;
__device__ HEADER d_hdr;
unsigned char master_pass[BUFLEN];
unsigned char *d_master_pass;
/*Not sure what a good default for this is*/
uint32_t cudaBlocks = 256;
__device__ int passFound = 0;
static struct option longopts[] =
{
{ "help", no_argument, 0, 'h' },
{ "crack", no_argument, 0, 'c' },
{ "blocks", required_argument, 0, 'b' },
{ "sfrom", required_argument, 0, 'f' },
{ 0, 0, 0, 0 }
};
void help()
{
fprintf(stderr,
"\nMilitary-Grade Password Cracker\n"
"Usage: ./passwd_cracker [options] [suboptions] <database>\n\n"
"--crack\n"
"\tBrute force the database\n\n"
"--blocks n\n"
"\tDistribute attack across n number of CUDA blocks.\n\n"
"--sfrom 0-4294967295 (default 0)\n"
"\tSpecify where to begin in range of seed values.\n\n"
);
}
void derive_key(KEY *key, uint32_t trialSeed)
{
int i = 0;
srand(trialSeed);
for (i = 0; i < KEYLEN; i++)
key->state[i] = rand() & 0xffff;
}
void encrypt(KEY *key, unsigned char *data, const size_t len)
{
uint32_t i = 0, t = 0, x = 0, y = 0;
uint32_t state[KEYLEN];
memcpy(&state, key->state, sizeof(state));
for (; i < len; i++)
{
x = (x + 1) % KEYLEN;
y = (y + state[x]) % KEYLEN;
t = state[x];
state[x] = state[y];
state[y] = t;
t = (state[x] + state[y]) % KEYLEN;
data[i] = state[t] ^ data[i];
}
}
/*An implementation of glibc's rand() that will produce the same values for respective seed value*/
/*This is needed since I can't call rand() from a kernel and it MUST produce the same values that glibc's would*/
__device__ uint32_t randInt(uint32_t seed, uint32_t nSeedUse) {
/*If this is not shared memory then is slows down the performance dramatically*/
/*Set to 600 because in addition to the first 344 values it must hold 256 values for the size of the key */
__shared__ int r[600];
/*This must be kept static so that successive calls to the function produce the correct next random value per seed*/
static int i;
if(seed == 0)
seed++;
r[0] = seed;
for (i=1; i<31; i++) {
r[i] = (16807LL * r[i-1]) % 2147483647;
if (r[i] < 0) {
r[i] += 2147483647;
}
}
for (i=31; i<34; i++) {
r[i] = r[i-31];
}
for (i=34; i<344; i++) {
r[i] = r[i-31] + r[i-3];
}
/*nSeedUse represents the number of times this seed has been used*/
r[344 + nSeedUse] = r[(344 + nSeedUse)-31] + r[(344 + nSeedUse)-3];
return (unsigned int)r[344 + nSeedUse] >> 1;
}
__device__ void d_derive_key(KEY *d_key, uint32_t trialSeed)
{
__shared__ int i;
i = 0;
for (i = 0; i < KEYLEN; i++) {
d_key->state[i] = randInt(trialSeed, i) & 0xffff;
}
}
__device__ void d_encrypt(KEY *d_key, unsigned char *data, const size_t len)
{
__shared__ uint32_t i, t, x, y;
i = 0;
t = 0;
x = 0;
y = 0;
for (i = 0; i < len; i++)
{
x = (x + 1) % KEYLEN;
y = (y + d_key->state[x]) % KEYLEN;
t = d_key->state[x];
d_key->state[x] = d_key->state[y];
d_key->state[y] = t;
t = (d_key->state[x] + d_key->state[y]) % KEYLEN;
data[i] = d_key->state[t] ^ data[i];
}
}
/*A replacement of strlen() to be called in a kernel*/
__device__ int stringLength(unsigned char *string)
{
__shared__ int i;
i = 0;
while(string[i] != '\0') {
i++;
}
return i;
}
/*A replacement for isprint() to be called in a kernel*/
__device__ int isPrintable(unsigned char c)
{
if(c >= 32 || c <= 126)
return 1;
else
return 0;
}
__device__ int crack(unsigned char *master_pass, uint32_t trialSeed, uint32_t *foundSeed)
{
__shared__ unsigned char buf[BUFLEN];
memset(buf,0,BUFLEN);
__shared__ uint32_t mstrPassSeed;
mstrPassSeed = 0;
int i = 0, nonprintable = 0;
__shared__ KEY d_key;
/*This MUST be local so that each block has its own copy of the master_pass. Otherwise one block may change it before the other has confirmed it decrypted correctly*/
__shared__ unsigned char masterPassLocal[BUFLEN];
memcpy(masterPassLocal,master_pass,sizeof(unsigned char) * BUFLEN);
d_derive_key(&d_key, trialSeed);
d_encrypt(&d_key, masterPassLocal, BUFLEN);
/*This will stop other blocks from executing further if the password has already been found*/
if(passFound == 1)
return 0;
/*If the last byte of the resulting decryption is a null byte it MIGHT be the null-terminated password*/
if (masterPassLocal[BUFLEN-1] == '\0')
{
/*FIXME: Maybe this would be faster as a memcpy call instead*/
for(i = 0; i < stringLength(masterPassLocal) ; i++) {
buf[i] = masterPassLocal[i];
}
/*Serialize the resulting decryption and see if the integer it produces matches the trialSeed*/
for (i = 0; i < BUFLEN - 4; i+=4)
mstrPassSeed ^= (uint32_t) buf[i+0] << 0
| (uint32_t) buf[i+1] << 8
| (uint32_t) buf[i+2] << 16
| (uint32_t) buf[i+3] << 24;
/*If the resulting decryption produces the same integer as the trialSeed and it's a null-terminated string it's probably the password*/
if(trialSeed == mstrPassSeed)
{
/*Test the resulting decryption for any non-printable characters*/
for(i = 0; i < BUFLEN; i++)
{
if(!isPrintable(masterPassLocal[i]))
nonprintable++;
}
/*If it contains no non-printable characters then it is surely the password*/
if(!nonprintable) {
*foundSeed = trialSeed;
passFound = 1;
return 0;
}
}
}
return 1;
}
__global__ void distributeAndCrack(uint32_t *seedArray, unsigned char *master_pass, uint32_t *foundSeed)
{
/*seedArray is an array of integers with each being the starting seed value each block should start at*/
__shared__ uint32_t trialSeed;
trialSeed = seedArray[blockIdx.x];
/*Was hoping that making the copy of the encrypted master pass that gets sent to crack() will be faster if it's in shared memory*/
__shared__ unsigned char sharedMasterPass[BUFLEN];
memcpy(sharedMasterPass,master_pass,sizeof(unsigned char) * BUFLEN);
/*Iterate each block from its start seed up until the next block's start seed*/
while( trialSeed < seedArray[blockIdx.x + 1])
{
if (crack(sharedMasterPass, trialSeed, foundSeed) == 0)
{
asm("exit;");
}
trialSeed++;
}
}
int main(int argc, char **argv)
{
char *db = NULL;
int opts = 0, idx = 0, ret = 0;
int _crack = 0, _setbocks = 0, _from = 0;
startSeed = 0;
KEY key;
HEADER hdr;
ENTRY entry;
FILE *dbh;
while (1)
{
if ((opts = getopt_long_only(argc, argv, "", longopts, &idx)) == -1)
break;
switch (opts)
{
case 0:
if (longopts[idx].flag)
break;
case 'h':
help();
return 0;
case 'c':
_crack++;
break;
case 'b':
_setbocks++;
cudaBlocks = atoi(optarg);
break;
case 'f':
_from++;
sscanf(optarg,"%u",&startSeed);
break;
default:
abort();
}
}
if (optind == argc)
{
fprintf(stderr, "Error: database required\n");
return -1;
}
assert(db = strdup(argv[optind]));
if (_crack)
{
if ((dbh = fopen(db, "r")) == NULL)
return errno;
fread(&hdr, sizeof(hdr), 1, dbh);
fread(&entry, sizeof(entry), 1, dbh);
cudaBlockSeed = (uint32_t *)malloc(sizeof(uint32_t) * cudaBlocks);
/*This loop will divvy up the range of seeds to try equally per the amount of blocks and store the start of each block's range into an integer in the array*/
for(int i=1; i < cudaBlocks; i++)
{
cudaBlockSeed[i] = (((UINT_MAX-startSeed)/cudaBlocks) * i + startSeed);
}
/*This will allow the first block to be started at a different seed value if specified*/
cudaBlockSeed[0] = startSeed;
cudaMalloc((void **)&d_cudaBlockSeed, sizeof(uint32_t) * cudaBlocks);
cudaMemcpy(d_cudaBlockSeed, cudaBlockSeed, sizeof(uint32_t) * cudaBlocks, cudaMemcpyHostToDevice);
/*The master password is stored encrypted in the header, so load it up to attempt trial decryptions on*/
memcpy(&master_pass,&hdr.master_pass,sizeof(hdr.master_pass));
cudaMalloc((void **)&d_master_pass, sizeof(master_pass));
cudaMemcpy(d_master_pass, master_pass, sizeof(master_pass), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_trialSeed, sizeof(uint32_t));
cudaMemcpy(d_trialSeed,&trialSeed,sizeof(uint32_t), cudaMemcpyHostToDevice);
/*This kernel will start each block off at the proper seed value and launch the brute-force attack*/
/*When the proper seed value is found it will be stored into d_trialSeed*/
distributeAndCrack<<<cudaBlocks,1>>>(d_cudaBlockSeed, d_master_pass, d_trialSeed);
cudaDeviceSynchronize();
cudaMemcpy(&trialSeed,d_trialSeed,sizeof(uint32_t), cudaMemcpyDeviceToHost);
/*Now that the proper seed has been found, it can be used to decrypt and print the database*/
derive_key(&key, trialSeed);
encrypt(&key, hdr.master_pass, BUFLEN);
encrypt(&key, entry.site, sizeof(entry.site));
encrypt(&key, entry.user, sizeof(entry.user));
encrypt(&key, entry.pass, sizeof(entry.pass));
fprintf(stdout,"\n%-32s\t%-16s\t%-16s\n", "SITE", "USERNAME", "PASSWORD");
fprintf(stdout,"--------------------------------");
fprintf(stdout,"--------------------------------");
fprintf(stdout,"----------------\n");
fprintf(stdout,"%-32s\t%-16s\t%-16s\n", entry.site, entry.user, entry.pass);
while (!feof(dbh) && fread(&entry, sizeof(entry), 1, dbh) == 1)
{
encrypt(&key, entry.site, sizeof(entry.site));
encrypt(&key, entry.user, sizeof(entry.user));
encrypt(&key, entry.pass, sizeof(entry.pass));
printf("%-32s\t%-16s\t%-16s\n", entry.site, entry.user, entry.pass);
}
fprintf(stdout,"Master Pass: %s\n", hdr.master_pass);
fprintf(stdout,"Seed value: %u\n", trialSeed);
printf ("\n");
fflush(stdout);
fclose(dbh);
return ret;
}
return -1;
}
|
9,633 | #include "includes.h"
__global__ void cunn_TemporalMaxPooling_updateGradInputKernelAtomic(float *gradInput, float *gradOutput, float *indices, int input_w, int input_n, int output_w, int kW, int dW) {
// Block idx is the batch index, thread idx + block idx y * MAX_THREADS is the time index
float *gradInput_data = gradInput + blockIdx.x * input_w * input_n + (
threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n * dW;
float *gradOutput_data = gradOutput + blockIdx.x * output_w * input_n + (
threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n;
float *indices_data = indices + blockIdx.x * output_w * input_n + (
threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n;
int feat = 0;
if (threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS < output_w) {
// For all features
for (feat = 0; feat < input_n; ++feat) {
atomicAdd(&gradInput_data[(int)indices_data[feat] * input_n + feat], gradOutput_data[feat]);
}
}
} |
9,634 | /*
* Just how many cuda enabled devices on this machine?
* Also, what are their properties?
*
* Note - EVERY cuda call returns an error value. While
* this is vital in real code, it gets in the way of
* tutorial code. I'm showing it here for cudaGetDeviceCount
* but will omit it for the rest of the tutorial.
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char** argv) {
int numberOfDevices;
cudaError_t err;
err = cudaGetDeviceCount(&numberOfDevices);
if (err != cudaSuccess) {
fprintf(stderr,"fail - cudaGetDeviceCount %d\n",err);
exit(1);
}
printf("Number of cuda devices = %d\n",numberOfDevices);
/* the cudaDeviceProp struct is fairly large - read about it in the
docs. */
for (int dev = 0; dev < numberOfDevices; dev++) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props,dev);
printf("Device # %d\n",dev);
printf(" name = %s\n",props.name);
printf(" version = %d.%d\n",props.major,props.minor);
printf(" total global memory = %ld\n",props.totalGlobalMem);
printf(" shared Memory/Block = %ld\n",props.sharedMemPerBlock);
printf(" registers/block = %d\n",props.regsPerBlock);
printf(" warp size = %d\n",props.warpSize);
printf(" Max threads/block = %d\n",props.maxThreadsPerBlock);
printf(" Max Threads Dim = %d x %d x %d\n",props.maxThreadsDim[0],
props.maxThreadsDim[1],props.maxThreadsDim[2]);
printf(" Max Grid Size = %d x %d x %d\n",props.maxGridSize[0],
props.maxGridSize[1],props.maxGridSize[2]);
printf(" Multi-processor count = %d\n",props.multiProcessorCount);
printf(" Max Threads/multiprocessor = %d\n",props.maxThreadsPerMultiProcessor);
}
return 0;
}
|
9,635 | #include <iostream>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
typedef unsigned long long llong_t;
int main()
{
const llong_t N = 1 << 25;
thrust::device_vector<llong_t> vec(N);
llong_t sum = 0;
for (int i = 0; i < 10; i++) {
// fill the array
// for (llong_t i = 0; i < N; i++) vec[i] = i;
thrust::sequence(vec.begin(), vec.end(), 0);
// calculate the sum
// for (llong_t i = 0; i < N; i++) sum += vec[i];
sum = thrust::reduce(vec.begin(), vec.end(), 0);
// dump result
cout << "sum = " << sum << endl;
}
return 0;
}
|
9,636 | #include <stdio.h>
#include <ctype.h>
int main ( int argc, char *argv[ ] ){
if ( argc != 2 )
printf( "\n Usage \n%s filetoread \n", argv[ 0 ] );
else {
char x;
int rules[ 100 ]; //length of rules that can be read
int a = 0;
char *filename = argv[ 1 ];
FILE *ptr1 = fopen( filename, "r" );
fscanf( ptr1, "%c", &x );
while( !feof( ptr1 ) ) {
if ( isalnum( x ) ) {
int y = atoi( &x );
//printf( "\n%d", y );
fscanf( ptr1, "%c", &x );
rules[ a ] = y;
}
else { // ! = 33, $ = 36, ' ' = 32
//printf( "-ELSE-" );
rules[ a ] = -1;
fscanf( ptr1, "%c", &x );
}
a++;
}
//print the loaded rules
// Rules on file: 2 2 $ 1 $ 1 2
// Rules on load: |2 |-1 |2 |-1 |-1 |-1 |1 |-1 |-1 |-1 |1 |-1 |2 |-1 |
printf( "\n" );
int oneCnt = 1;
int ruleCnt = 1;
int neuron = 1;
// Find out how many rules are there.
for( int x = 0; rules[ x ] != 0; x++) {
//printf( "%d |", rules[ x ] );
if ( rules[ x ] > 0 && oneCnt < 4 ){
oneCnt = 1;
}
else if ( rules[ x ] < 0 && oneCnt < 3 ) {
oneCnt = oneCnt + 1;
}
else if ( rules[ x ] < 0 && oneCnt == 3 ) {
oneCnt = 1;
ruleCnt = ruleCnt + 1;
}
}
oneCnt = 1;
printf( "\nThere are %d rules loaded\n", ruleCnt );
int rulePrint = 1;
printf( "Neuron %d rule/s:\n", neuron );
for( int x = 0; rulePrint <= ruleCnt && rules[ x ] != 0; x++) {
//printf( "%d |", rules[ x ] );
if ( rules[ x ] > 0 && oneCnt < 4 ){
printf( " %d ", rules[ x ], oneCnt, rulePrint );
oneCnt = 1;
}
else if ( rules[ x ] < 0 && oneCnt < 3 ) {
oneCnt = oneCnt + 1;
// printf( " B " );
}
else if ( rules[ x ] < 0 && oneCnt == 3 ) {
neuron = neuron + 1;
printf( "\nNeuron %d rule/s:\n", neuron );
oneCnt = 1;
rulePrint = rulePrint + 1;
// printf( " C " );
}
}
// printf( " '%c' '%d' '%c' '%d' '%c' '%d'", " ", " ", "$", "$", 33, 33 );
printf( "\n" );
}
}
|
9,637 | __global__ void _abs2_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = (xi*xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void abs2_32(int n, float *x, float *y) {
_abs2_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _abs2_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = (xi*xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void abs2_64(int n, double *x, double *y) {
_abs2_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _abs_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = (xi<0?-xi:xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void abs_32(int n, float *x, float *y) {
_abs_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _abs_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = (xi<0?-xi:xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void abs_64(int n, double *x, double *y) {
_abs_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _htanh_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = (xi<-1?-1 : (xi > 1 ? 1 :xi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void htanh_32(int n, float *x, float *y) {
_htanh_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _htanh_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = (xi<-1?-1 : (xi > 1 ? 1 :xi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void htanh_64(int n, double *x, double *y) {
_htanh_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _acos_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = acos(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void acos_32(int n, float *x, float *y) {
_acos_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _acos_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = acos(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void acos_64(int n, double *x, double *y) {
_acos_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _acosh_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = acosh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void acosh_32(int n, float *x, float *y) {
_acosh_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _acosh_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = acosh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void acosh_64(int n, double *x, double *y) {
_acosh_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _asin_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = asin(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void asin_32(int n, float *x, float *y) {
_asin_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _asin_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = asin(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void asin_64(int n, double *x, double *y) {
_asin_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _asinh_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = asinh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void asinh_32(int n, float *x, float *y) {
_asinh_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _asinh_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = asinh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void asinh_64(int n, double *x, double *y) {
_asinh_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _atan_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = atan(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void atan_32(int n, float *x, float *y) {
_atan_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _atan_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = atan(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void atan_64(int n, double *x, double *y) {
_atan_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _atanh_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = atanh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void atanh_32(int n, float *x, float *y) {
_atanh_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _atanh_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = atanh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void atanh_64(int n, double *x, double *y) {
_atanh_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _cbrt_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = cbrt(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void cbrt_32(int n, float *x, float *y) {
_cbrt_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _cbrt_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = cbrt(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void cbrt_64(int n, double *x, double *y) {
_cbrt_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _ceil_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = ceil(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ceil_32(int n, float *x, float *y) {
_ceil_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _ceil_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = ceil(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ceil_64(int n, double *x, double *y) {
_ceil_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _cos_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = cos(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void cos_32(int n, float *x, float *y) {
_cos_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _cos_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = cos(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void cos_64(int n, double *x, double *y) {
_cos_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _cosh_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = cosh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void cosh_32(int n, float *x, float *y) {
_cosh_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _cosh_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = cosh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void cosh_64(int n, double *x, double *y) {
_cosh_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _cospi_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = cospi(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void cospi_32(int n, float *x, float *y) {
_cospi_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _cospi_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = cospi(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void cospi_64(int n, double *x, double *y) {
_cospi_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erf_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = erf(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erf_32(int n, float *x, float *y) {
_erf_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erf_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = erf(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erf_64(int n, double *x, double *y) {
_erf_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erfc_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = erfc(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erfc_32(int n, float *x, float *y) {
_erfc_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erfc_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = erfc(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erfc_64(int n, double *x, double *y) {
_erfc_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erfcinv_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = erfcinv(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erfcinv_32(int n, float *x, float *y) {
_erfcinv_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erfcinv_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = erfcinv(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erfcinv_64(int n, double *x, double *y) {
_erfcinv_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erfcx_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = erfcx(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erfcx_32(int n, float *x, float *y) {
_erfcx_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erfcx_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = erfcx(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erfcx_64(int n, double *x, double *y) {
_erfcx_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erfinv_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = erfinv(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erfinv_32(int n, float *x, float *y) {
_erfinv_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _erfinv_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = erfinv(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void erfinv_64(int n, double *x, double *y) {
_erfinv_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _exp_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = exp(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void exp_32(int n, float *x, float *y) {
_exp_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _exp_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = exp(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void exp_64(int n, double *x, double *y) {
_exp_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _exp10_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = exp10(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void exp10_32(int n, float *x, float *y) {
_exp10_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _exp10_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = exp10(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void exp10_64(int n, double *x, double *y) {
_exp10_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _exp2_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = exp2(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void exp2_32(int n, float *x, float *y) {
_exp2_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _exp2_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = exp2(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void exp2_64(int n, double *x, double *y) {
_exp2_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _expm1_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = expm1(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void expm1_32(int n, float *x, float *y) {
_expm1_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _expm1_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = expm1(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void expm1_64(int n, double *x, double *y) {
_expm1_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _floor_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = floor(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void floor_32(int n, float *x, float *y) {
_floor_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _floor_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = floor(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void floor_64(int n, double *x, double *y) {
_floor_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _invx_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = 1/xi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void invx_32(int n, float *x, float *y) {
_invx_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _invx_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = 1/xi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void invx_64(int n, double *x, double *y) {
_invx_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _log_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = log(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void log_32(int n, float *x, float *y) {
_log_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _log_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = log(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void log_64(int n, double *x, double *y) {
_log_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _log10_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = log10(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void log10_32(int n, float *x, float *y) {
_log10_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _log10_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = log10(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void log10_64(int n, double *x, double *y) {
_log10_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _log1p_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = log1p(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void log1p_32(int n, float *x, float *y) {
_log1p_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _log1p_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = log1p(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void log1p_64(int n, double *x, double *y) {
_log1p_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _log2_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = log2(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void log2_32(int n, float *x, float *y) {
_log2_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _log2_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = log2(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void log2_64(int n, double *x, double *y) {
_log2_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _neg_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = -xi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void neg_32(int n, float *x, float *y) {
_neg_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _neg_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = -xi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void neg_64(int n, double *x, double *y) {
_neg_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _relu_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = (xi>0?xi:0);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void relu_32(int n, float *x, float *y) {
_relu_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _relu_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = (xi>0?xi:0);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void relu_64(int n, double *x, double *y) {
_relu_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _lzorelu_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = (xi<1e-6?1e-6:xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void lzorelu_32(int n, float *x, float *y) {
_lzorelu_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _lzorelu_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = (xi<1e-6?1e-6:xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void lzorelu_64(int n, double *x, double *y) {
_lzorelu_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _round_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = round(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void round_32(int n, float *x, float *y) {
_round_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _round_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = round(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void round_64(int n, double *x, double *y) {
_round_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sigm_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = (xi>=0?1/(1+exp(-xi)):(exp(xi)/(1+exp(xi))));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sigm_32(int n, float *x, float *y) {
_sigm_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sigm_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = (xi>=0?1/(1+exp(-xi)):(exp(xi)/(1+exp(xi))));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sigm_64(int n, double *x, double *y) {
_sigm_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sign_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = (xi>0?1:xi<0?-1:0);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sign_32(int n, float *x, float *y) {
_sign_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sign_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = (xi>0?1:xi<0?-1:0);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sign_64(int n, double *x, double *y) {
_sign_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sin_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = sin(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sin_32(int n, float *x, float *y) {
_sin_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sin_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = sin(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sin_64(int n, double *x, double *y) {
_sin_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sinh_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = sinh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sinh_32(int n, float *x, float *y) {
_sinh_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sinh_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = sinh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sinh_64(int n, double *x, double *y) {
_sinh_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sinpi_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = sinpi(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sinpi_32(int n, float *x, float *y) {
_sinpi_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sinpi_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = sinpi(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sinpi_64(int n, double *x, double *y) {
_sinpi_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sqrt_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = sqrt(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sqrt_32(int n, float *x, float *y) {
_sqrt_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _sqrt_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = sqrt(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sqrt_64(int n, double *x, double *y) {
_sqrt_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _tan_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = tan(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void tan_32(int n, float *x, float *y) {
_tan_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _tan_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = tan(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void tan_64(int n, double *x, double *y) {
_tan_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _tanh_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = tanh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void tanh_32(int n, float *x, float *y) {
_tanh_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _tanh_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = tanh(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void tanh_64(int n, double *x, double *y) {
_tanh_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _trunc_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = trunc(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void trunc_32(int n, float *x, float *y) {
_trunc_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _trunc_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = trunc(xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void trunc_64(int n, double *x, double *y) {
_trunc_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _G_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = exp(-(xi*xi)*0.5)*0.3989423;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void G_32(int n, float *x, float *y) {
_G_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _G_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = exp(-(xi*xi)*0.5) *0.3989422804014327;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void G_64(int n, double *x, double *y) {
_G_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _H_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = 0.5 * erfc(xi *0.70710677);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void H_32(int n, float *x, float *y) {
_H_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _H_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = 0.5 * erfc(xi*0.7071067811865475);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void H_64(int n, double *x, double *y) {
_H_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _GH_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = x[i];
y[i] = xi > 30 ? xi + 1/xi * (1 - 2/(xi*xi) * (1 - 5/(xi*xi) * (1 - 7.4/(xi*xi)))) : 0.7978846 * exp(-(xi*xi)*0.5) / (erfc(xi *0.70710677));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void GH_32(int n, float *x, float *y) {
_GH_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _GH_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = x[i];
y[i] = xi > 30 ? xi + 1/xi * (1 - 2/(xi*xi) * (1 - 5/(xi*xi) * (1 - 7.4/(xi*xi)))) : exp(-(xi*xi)/2) / (1.2533 * erfc(xi / 1.4142));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void GH_64(int n, double *x, double *y) {
_GH_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _atanh2Hm1_32(int n, float *x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n)
{
float xi = x[i];
if (abs(xi) > 6)
{
float x2 = xi * xi;
float x4 = x2 * x2;
float x6 = x4 * x2;
float x8 = x4 * x4;
float x10= x6 * x4;
float antisg = (xi >= 0) ? -1.0 : 1.0;
y[i] = antisg * (408.1 / x10 - 44.125 /x8 + 37 /(6*x6) - 1.25 /x4 + 0.5 /x2 + 0.25*x2 + 0.459469 + 0.25*log(x2));
}
else
{
y[i] = atanh(erfc(0.70710677*xi)-1.0);
}
i += blockDim.x * gridDim.x;
}
__syncthreads();
}
#ifdef __cplusplus
extern "C" {
#endif
void atanh2Hm1_32(int n, float *x, float *y) {
_atanh2Hm1_32<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _atanh2Hm1_64(int n, double *x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n)
{
double xi = x[i];
if (abs(xi) > 6)
{
double x2 = xi * xi;
double x4 = x2 * x2;
double x6 = x4 * x2;
double x8 = x4 * x4;
double x10= x6 * x4;
double antisg = (xi >= 0) ? -1.0 : 1.0;
y[i] = antisg * (408.1 / x10 - 44.125 /x8 + 37 /(6*x6) - 1.25 /x4 + 0.5 /x2 + 0.25*x2 + 0.459469 + 0.25*log(x2));
}
else
{
y[i] = atanh(erfc(0.7071067811865475*xi)-1.0);
}
i += blockDim.x * gridDim.x;
}
__syncthreads();
}
#ifdef __cplusplus
extern "C" {
#endif
void atanh2Hm1_64(int n, double *x, double *y) {
_atanh2Hm1_64<<<128,128>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _fill_32(int n, float x, float *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
y[i] = x;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void fill_32(int n, float x, float *y) {
_fill_32<<<256,256>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _fill_64(int n, double x, double *y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
y[i] = x;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void fill_64(int n, double x, double *y) {
_fill_64<<<256,256>>>(n,x,y);
}
#ifdef __cplusplus
}
#endif
__global__ void _xfill_32(int nrows, int ncols, float x, float *y, int incy) {
int row, col, yidx;
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (1) {
row = i % nrows;
col = i / nrows;
if (col >= ncols) break;
yidx = row + col * incy;
y[yidx] = x;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void xfill_32(int nrows, int ncols, float x, float *y, int incy) {
_xfill_32<<<256,256>>>(nrows, ncols, x, y, incy);
}
#ifdef __cplusplus
}
#endif
__global__ void _xfill_64(int nrows, int ncols, double x, double *y, int incy) {
int row, col, yidx;
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (1) {
row = i % nrows;
col = i / nrows;
if (col >= ncols) break;
yidx = row + col * incy;
y[yidx] = x;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void xfill_64(int nrows, int ncols, double x, double *y, int incy) {
_xfill_64<<<256,256>>>(nrows, ncols, x, y, incy);
}
#ifdef __cplusplus
}
#endif
__global__ void _xcopy(int nrows, int ncols, const char *x, int incx, char *y, int incy) {
int row, col, xidx, yidx;
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (1) {
row = i % nrows;
col = i / nrows;
if (col >= ncols) break;
xidx = row + col * incx;
yidx = row + col * incy;
y[yidx] = x[xidx];
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void xcopy(int nrows, int ncols, const void *x, int incx, void *y, int incy) {
_xcopy<<<256,256>>>(nrows,ncols,(char*)x,incx,(char*)y,incy);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_1_3_2_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = i + dimx1*k + dimx1*dimx2*j;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_1_3_2_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_1_3_2_32_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_1_3_2_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = i + dimx1*k + dimx1*dimx2*j;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_1_3_2_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_1_3_2_64_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_2_1_3_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = j + dimx1*i + dimx1*dimx2*k;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_2_1_3_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_2_1_3_32_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_2_1_3_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = j + dimx1*i + dimx1*dimx2*k;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_2_1_3_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_2_1_3_64_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_2_3_1_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = k + dimx1*i + dimx1*dimx2*j;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_2_3_1_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_2_3_1_32_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_2_3_1_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = k + dimx1*i + dimx1*dimx2*j;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_2_3_1_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_2_3_1_64_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_3_1_2_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = j + dimx1*k + dimx1*dimx2*i;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_3_1_2_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_3_1_2_32_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_3_1_2_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = j + dimx1*k + dimx1*dimx2*i;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_3_1_2_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_3_1_2_64_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_3_2_1_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = k + dimx1*j + dimx1*dimx2*i;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_3_2_1_32_44(float* x, int dimx1, int dimx2, int dimx3, float* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_3_2_1_32_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
__global__ void _permutedims3D_3_2_1_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
for (int v = threadIdx.x + blockIdx.x * blockDim.x; v < dimy1*dimy2*dimy3; v += blockDim.x * gridDim.x) {
int i = v % dimy1;
int j = ((v - i) / dimy1) % dimy2;
int k = ((v - j * dimy1 - i) / (dimy1 * dimy2)) % dimy3;
int srcIndex = k + dimx1*j + dimx1*dimx2*i;
y[v] = x[srcIndex];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void permutedims3D_3_2_1_64_44(double* x, int dimx1, int dimx2, int dimx3, double* y, int dimy1, int dimy2, int dimy3) {
_permutedims3D_3_2_1_64_44<<<256,256>>>(x,dimx1,dimx2,dimx3,y,dimy1,dimy2,dimy3);
}
#ifdef __cplusplus
}
#endif
|
9,638 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void convolution(int *I, int *M, int *O,int * w,int *mw){
int WIDTH = * w;
int MASK_WIDTH = *mw;
int i = threadIdx.x;
float op = 0;
int si = i-(MASK_WIDTH/2);
for(int j =0; j<MASK_WIDTH;j++){
if(si+j >=0 && si+j < WIDTH){
op+= I[si+j]*M[j];
}
}
O[i]=op;
}
int main(int argc, char const *argv[]){
int n,m,*d_w,*d_mw;
printf("Enter the value n and m\n");
scanf("%d",&n);
scanf("%d",&m);
int input[n],output[n],mask[m],*d_i,*d_m,*d_o;
printf("Enter elements in 1st input array:\n");
for(int i = 0;i<n;i++){
scanf("%d",&input[i]);
}
printf("Enter elements in 2nd input array:\n");
for(int i = 0;i<m;i++){
scanf("%d",&mask[i]);
}
cudaMalloc((void **)&d_i,sizeof(int)*n);
cudaMalloc((void **)&d_m,sizeof(int)*m);
cudaMalloc((void **)&d_o,sizeof(int)*n);
cudaMalloc((void **)&d_w,sizeof(int));
cudaMalloc((void **)&d_mw,sizeof(int));
cudaMemcpy(d_i,input,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_m,mask,m*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_w,&n,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_mw,&m,sizeof(int),cudaMemcpyHostToDevice);
convolution<<<1,n>>>(d_i,d_m,d_o,d_w,d_mw);
cudaMemcpy(output,d_o,n*sizeof(int),cudaMemcpyDeviceToHost);
for(int i = 0;i<n;i++){
printf("%d ",output[i]);
}
cudaFree(d_i);
cudaFree(d_m);
cudaFree(d_o);
return 0;
}
|
9,639 | #include "includes.h"
__global__ void cudaNoConversion_kernel(float * data, float * tickOutputsTraces, float * tickOutputsTracesLearning, float scaling, unsigned int inputDimX, unsigned int inputDimY, unsigned int inputDimZ)
{
const unsigned int inputSize = inputDimX * inputDimY * inputDimZ;
const unsigned int batchOffset = blockIdx.x * inputSize;
for (unsigned int idx = threadIdx.x; idx < inputSize; idx += blockDim.x) {
float value = data[idx + batchOffset];
tickOutputsTraces[idx + batchOffset] = scaling*value;
tickOutputsTracesLearning[idx + batchOffset] += scaling*value;
}
} |
9,640 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <cuda.h>
#define ITERATION 500
#define BLKSIZE 512
typedef unsigned long long bint;
float * allocate(bint n){
bint size = (n+1)*(n+1);
float *m = (float *)calloc(size, sizeof(float));
bint i;
for (i=0; i<n+1; i++){
m[i] = 80;
m[size-1-i] = 80;
m[(n+1)*i] = 80;
m[(n+1)*(i+1)-1] = 80;
m[i] = (i >=10 && i<=30) ? 150 : m[i];
}
return m;
}
float avg(float *m, bint dim){
bint size = dim*dim;
float sum = 0;
bint i;
for (i=0; i<size; i++){
sum += m[i];
//if (i % dim==0)
// printf("\n");
//printf("%f ", m[i]);
}
//printf("\n");
return sum/size;
}
__global__ void simulateKernel(float *s, float *d, bint dim){
//dim is one side length of matrix
bint tid = threadIdx.x + blockIdx.x * blockDim.x;
bint i = tid + dim + 1 + 2*(tid/(dim-2));
if (i < dim*(dim-1)-1)
d[i] = (s[i-1] + s[i+1] + s[i-dim] + s[i+dim]) / 4;
}
int main(int argc, char *argv[]){
if (argc < 2){
printf("Please indicate matrix size.\n");
exit(0);
}
bint n = atoi(argv[1]);
float *m = allocate(n);
//float mean = avg(m, n+1);
//printf("%f===>",mean);
// allocation and copy to DEVICE
float * a, *b;
bint mem = (n+1)*(n+1)*sizeof(float);
cudaMalloc((void **)&a, mem);
cudaMalloc((void **)&b, mem);
cudaMemcpy(a, m, mem, cudaMemcpyHostToDevice);
cudaMemcpy(b, m, mem, cudaMemcpyHostToDevice);
// call kernel function
bint gridSize = ((n+1)*(n+1) % BLKSIZE == 0)? (n+1)*(n+1)/BLKSIZE : (n+1)*(n+1)/BLKSIZE+1;
int i;
for (i=0; i<ITERATION/2; i++){
simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1);
simulateKernel<<<gridSize, BLKSIZE>>>(b, a, n+1);
}
if (ITERATION%2 !=0){
simulateKernel<<<gridSize, BLKSIZE>>>(a, b, n+1);
cudaMemcpy(m, b, mem, cudaMemcpyDeviceToHost);
}
else{
cudaMemcpy(m, a, mem, cudaMemcpyDeviceToHost);
}
//mean = avg(m, n+1);
//printf("%f\n", mean);
free(m);
cudaFree(a);
cudaFree(b);
return 0;
}
|
9,641 | #include <iostream>
#include <string.h>
#include <stdio.h>
#include <assert.h>
using namespace std;
extern "C"
int initGPU(int mpi_proc_id)
{
cudaError_t err;
// total number of GPUs
int n_gpu;
err = cudaGetDeviceCount(&n_gpu);
assert(err == cudaSuccess);
assert(n_gpu > 0);
// select a GPU
int devid = mpi_proc_id % n_gpu;
err = cudaSetDevice(devid);
assert(err == cudaSuccess);
// check device Id
int devid2 = -1;
err = cudaGetDevice(&devid2);
assert(err == cudaSuccess);
assert(devid == devid2);
return (devid);
}
|
9,642 | int *test;
void SR_kernel_start(int w, int h, int ww, int hh){
cudaMalloc((void**)&test, 100*sizeof(int));
/*
cudaMalloc((void**)&d_ansR, ww*hh*sizeof(int));
cudaMalloc((void**)&d_ansG, ww*hh*sizeof(int));
cudaMalloc((void**)&d_ansB, ww*hh*sizeof(int));
cudaFree(d_ansR);
cudaFree(d_ansG);
cudaFree(d_ansB);
*/
}
void SR_kernel_end(){
cudaFree(test);
}
|
9,643 | #include <cuda_runtime.h>
#include <stdio.h>
int main() {
printf("START\n");
int device_count;
cudaGetDeviceCount(&device_count);
printf("Number of CUDA devices: %d\n", device_count);
cudaDeviceProp props;
for (int i = 0; i < device_count; i++) {
cudaGetDeviceProperties(&props, i);
printf("Info for device #%d:\n ", i);
printf("\tName: %s\n", props.name);
printf("\tGlobal Memory: %d\n", props.totalGlobalMem);
printf("\tShared Memory per Block: %d\n", props.sharedMemPerBlock);
printf("\tRegisters per Block: %d\n", props.regsPerBlock);
printf("\tWarp Size: %d\n", props.warpSize);
printf("\tMax Threads per Block: %d\n", props.maxThreadsPerBlock);
printf("\tClock Rate: %d\n", props.clockRate);
printf("\tMemory Clock Rate: %d\n", props.memoryClockRate);
printf("\tMemory Bus Width: %d\n", props.memoryBusWidth);
printf("\tSM Count: %d\n", props.multiProcessorCount);
printf("\tThreads per SM: %d\n", props.maxThreadsPerMultiProcessor);
printf("\tConcurrent Kernels: %s\n", props.concurrentKernels ? "Y":"N");
//printf("\t: %d\n", props.);
}
return 0;
}
|
9,644 | #include <time.h>
#include <math.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]){
srand(time(NULL));
int N;
int i,j;
FILE *fp;
printf("Input gen size : ");
scanf("%d",&N);
char filename[10];
sprintf(filename,"%d.txt", N);
fp = fopen(filename,"w");
fprintf(fp,"%d\n",N);
int **UPCluster;
UPCluster = (int **)malloc((N+1)*sizeof(int*));
for(i=0;i<(N+1);i++)
{
UPCluster[i] =(int*)malloc( N * sizeof(int));
}
//cudaMallocManaged(&UPCluster, (N+1) * sizeof(int));
//for(i=0;i<(N+1);i++)
//{
// cudaMallocManaged(&UPCluster[i], N*sizeof(int));
//}
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
UPCluster[i][j] = (rand()%1000)+1;
if(i == j) UPCluster[i][j] = 0;
else if(i > j) UPCluster[i][j] = UPCluster[j][i];
//printf("%4d ",UPCluster[i][j]);
}
//printf("\n");
}
for(i = 0; i < N; i++)
{
fprintf(fp,"%d ",i);
for(j = 0; j < N; j++){
fprintf(fp,"%d ",UPCluster[i][j]);
}
fprintf(fp,"\n");
}
fclose(fp);
free(UPCluster);
}
|
9,645 | #include "includes.h"
__global__ void twiddleImgKernel(float *wi, float *w, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, index;
if (idx < N) {
if (idx == 0) {
for (i = 0; i < N; i++)
wi[idx * N + i] = 0;
} else {
wi[idx * N + 0] = 0;
for (i = 1; i < N; i++) {
index = (idx * i) % N;
wi[idx * N + i] = (-1) * w[index * 2 + 1];
}
}
}
} |
9,646 | #include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <string.h>
#include <assert.h>
#include <sys/time.h>
#include <malloc.h>
#include <math.h>
#include <cuda_runtime.h>
#define BLOCKWIDTH 1024
int *allocateMemoryInt(int length) {
int *vec;
if ((vec = (int *)malloc(length * sizeof(int))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
memset(vec, 0, length * sizeof(int));
return vec;
}
double *allocateMemoryDouble(int length) {
double *vec;
if ((vec = (double *)malloc(length * sizeof(double))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
return vec;
}
__global__ void CalcPageRank(int nodes, int edges, int *in_d, int *out_d,
int *run_d, int *edges_d, double *pagerank_old_d, double *pagerank_new_d) {
int node_index = blockIdx.x * BLOCKWIDTH + threadIdx.x;
if(node_index<nodes) {
double sum = 0;
double d = 0.85;
double jumpChance = (1 - d) * (1.0 / nodes);
int stopIdx = run_d[node_index] + in_d[node_index];
int k;
for (k = run_d[node_index]; k < stopIdx; k++) {
int jk = edges_d[k];
sum += pagerank_old_d[jk] / out_d[jk];
}
pagerank_new_d[node_index] = sum * d + jumpChance;
}
__syncthreads();
pagerank_old_d[node_index] = pagerank_new_d[node_index];
}
int main () {
int i = 0, j = 0, k = 0, run = 0, idx = 0;
int nodes = 38;
int edges = 156;
int iter = 38;
struct timeval stop, start;
int* indegree_count=allocateMemoryInt(nodes);
int* outdegree_count=allocateMemoryInt(nodes);
int* running_edge_indices=allocateMemoryInt(nodes);
int* edges_1D = allocateMemoryInt(edges);//node1:node2|node2->node1
double* pagerank_new;
double* pagerank_old;
if ((pagerank_new = (double *)malloc(nodes * sizeof(double))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
if ((pagerank_old = (double *)malloc(nodes * sizeof(double))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
for (i = 0; i < nodes; i++) {
pagerank_old[i] = 1.0 / (double)nodes;
}
memset(pagerank_new, 0, nodes * sizeof(double));
// set starting values for pagerank values to 1/n
for (i = 0; i < nodes; i++)
pagerank_old[i] = 1.0 / (double)nodes;
gettimeofday(&start, NULL);
setvbuf(stdin, NULL, _IOFBF, edges);
//reads in edges
for (i = 0; i < edges; i++) {
scanf("%d\n", &j);
edges_1D[i] = j;
}
//reads in in-degrees, out-degrees, and computes running idx
for (i = 0; i < nodes; i++) {
scanf("%d %d %d\n", &idx, &j, &k);
indegree_count[idx] = j;
outdegree_count[idx] = k;
running_edge_indices[idx] = run;
run += j;
}
gettimeofday(&stop, NULL);
fprintf(stderr, "Read took %lf seconds\n", (stop.tv_sec - start.tv_sec) +
((stop.tv_usec - start.tv_usec) / 1000000.0));
// Begin Cuda Setup
int *in_d, *out_d, *run_d, *edges_d;
double *pagerank_new_d, *pagerank_old_d;
int node_size = nodes * sizeof(int);
int pr_size = nodes * sizeof(double);
int edges_size = edges * sizeof(int);
cudaMalloc(&in_d, node_size);
cudaMemcpy(in_d, indegree_count, node_size, cudaMemcpyHostToDevice);
cudaMalloc(&out_d, node_size);
cudaMemcpy(out_d, outdegree_count, node_size, cudaMemcpyHostToDevice);
cudaMalloc(&run_d, node_size);
cudaMemcpy(run_d, running_edge_indices, node_size, cudaMemcpyHostToDevice);
cudaMalloc(&edges_d, edges_size);
cudaMemcpy(edges_d, edges_1D, edges_size, cudaMemcpyHostToDevice);
cudaMalloc(&pagerank_old_d,pr_size);
cudaMemcpy(pagerank_old_d, pagerank_old, pr_size, cudaMemcpyHostToDevice);
cudaMalloc(&pagerank_new_d,pr_size);
//cudaMemcpy(pagerank_new_d, pagerank_new, pr_size, cudaMemcpyHostToDevice);
int blocks = ceil((double)nodes/(double)BLOCKWIDTH);
dim3 dimGrid(blocks, 1, 1);
dim3 dimBlock(BLOCKWIDTH, 1, 1);
for(i=0; i < iter; i++) {
CalcPageRank<<<dimGrid, dimBlock>>>(nodes, edges, in_d, out_d, run_d, edges_d, pagerank_old_d, pagerank_new_d);
}
cudaMemcpy(pagerank_old, pagerank_old_d, nodes * sizeof(double), cudaMemcpyDeviceToHost);
gettimeofday(&stop, NULL);
fprintf(stderr, "Compute took %lf seconds\n", (stop.tv_sec - start.tv_sec) +
((stop.tv_usec - start.tv_usec) / 1000000.0));
cudaFree(in_d);
cudaFree(out_d);
cudaFree(run_d);
cudaFree(edges_d);
cudaFree(pagerank_old_d);
cudaFree(pagerank_new_d);
for (i = 0; i < nodes; i++)
printf("%.15lf:%d,", pagerank_old[i], i);
free(indegree_count);
free(outdegree_count);
free(pagerank_new);
free(pagerank_old);
free(running_edge_indices);
free(edges_1D);
return 0;
}
|
9,647 | #include <stdio.h>
//void setup_cuda(int ngpus, int argc, char **argv){
//insert from Bob' Born
// ;
//}
//void process_error( const cudaError_t &error, char *string=0, bool verbose=false ){
//insert from Bob's Born
// ;
//}
extern "C" void rtm_gpu_init(int nt, int nz, int nx, int zrec,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
// float * Vx, float * Vz, float * sigmaxx, float * sigmazz, float * sigmaxz, //(nt, nx)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)
{
//set cuda devices and put all data onto gpu memory
cudaError_t cuda_ret;
cudaError_t err;
//Set Device
fprintf(stderr,"GPU init. \n");
cuda_ret = cudaSetDevice(0);
if(cuda_ret != cudaSuccess){
fprintf(stderr, "Failed to set the cuda device !\n");
}
else{
fprintf(stderr, "cuda device set OK\n");
}
// init data
//cudaMalloc(&g_,sizeof()*nx*nz*nt);
}
|
9,648 | /* task-5.cu */
#include <stdio.h>
__global__ void myHelloOnGPU(int *array){
// Position1
array[blockIdx.x * blockDim.x + threadIdx.x] = ( blockDim.x - threadIdx.x - 1);
}
int main(){
int N = 16;
int *cpuArray = (int*)malloc(sizeof(int)*N);
int *gpuArray;
cudaMalloc((void **)&gpuArray, sizeof(int)*N);
// Position 2
myHelloOnGPU<<<N/4, N/4>>>(gpuArray);
cudaMemcpy(cpuArray, gpuArray, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++){
printf("%d ", cpuArray[i]);
}
printf("\n");
return 0;
}
|
9,649 | #include "includes.h"
__global__ void same_num_channels_div_kernel(const float *data_l, const float *data_r, float *result, unsigned total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
if (idx / 2 < total) {
result[idx] = (data_l[idx] * data_r[idx] + data_l[idx + 1] * data_r[idx + 1]) /
(data_r[idx] * data_r[idx] + data_r[idx + 1] * data_r[idx + 1]);
result[idx + 1] = (data_l[idx + 1] * data_r[idx] - data_l[idx] * data_r[idx + 1]) /
(data_r[idx] * data_r[idx] + data_r[idx + 1] * data_r[idx + 1]);
}
} |
9,650 | #include "includes.h"
__global__ void cuda_graph_maxpool_bprop(float* gradInput, const float *gradOutput, const float* indices, const int nClusters, const int dim, const int nClustersPerThread) {
extern __shared__ float shared_mem[];
float* gradOutput_data = (float*)shared_mem;
float* indices_data = (float*)&gradOutput_data[nClusters];
const int tidx = threadIdx.x;
gradInput += blockIdx.x * dim;
gradOutput += blockIdx.x * nClusters;
indices += blockIdx.x * nClusters;
__syncthreads();
for (int i = 0; i < nClustersPerThread; ++i) {
int idx = tidx + i*blockDim.x;
if (idx < nClusters) {
gradOutput_data[idx] = gradOutput[idx];
indices_data[idx] = indices[idx];
}
}
__syncthreads();
//ouch...
if (tidx == 1) {
for (int i = 0; i < nClusters; ++i) {
gradInput[(int)indices_data[i]-1] += gradOutput[i];
}
}
//gradInput[(int)indices_data[tidx]-1] = gradOutput[tidx];
} |
9,651 | #include <stdio.h>
__device__ float *g_Vx0;
__device__ float *g_Vz0;
__device__ float *g_sigmaxx0;
__device__ float *g_sigmazz0;
__device__ float *g_sigmaxz0;
__device__ float *g_m1_x;
__device__ float *g_m1_z;
__device__ float *g_aux_m2_c;
__device__ float *g_aux_m3_c;
__device__ float *g_aux_m2m3_c;
//void setup_cuda(int ngpus, int argc, char **argv){
//insert from Bob' Born
// ;
//}
//void process_error( const cudaError_t &error, char *string=0, bool verbose=false ){
//insert from Bob's Born
// ;
//}
extern "C" void rtm_gpu_init(int nt, int nz, int nx,
float * Vx0, float * Vz0, float * sigmaxx0, float * sigmazz0, float * sigmaxz0, //(nz, nx, nt)
// float * Vx, float * Vz, float * sigmaxx, float * sigmazz, float * sigmaxz, //(nt, nx)
float * m1_x,float * m1_z,float * aux_m2_c, float * aux_m3_c, float * aux_m2m3_c)
{
//set cuda devices and put all data onto gpu memory
cudaError_t cuda_ret;
cudaError_t err;
//Set Device
fprintf(stderr,"GPU init. \n");
cuda_ret = cudaSetDevice(0);
if(cuda_ret != cudaSuccess){
fprintf(stderr, "Failed to Set The cuda Device !\n");
}
else{
fprintf(stderr, "GPU Device Set OK\n");
}
// data init
cudaMalloc(&g_Vx0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_Vz0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_sigmaxx0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_sigmazz0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_sigmaxz0,sizeof(float)*nx*nz*nt);
cudaMalloc(&g_m1_x,sizeof(float)*nx*nz);
cudaMalloc(&g_m1_z,sizeof(float)*nx*nz);
cudaMalloc(&g_aux_m2_c,sizeof(float)*nx*nz);
cudaMalloc(&g_aux_m3_c,sizeof(float)*nx*nz);
cudaMalloc(&g_aux_m2m3_c,sizeof(float)*nx*nz);
fprintf(stderr,"GPU Data Init OK\n");
// data copy
cudaMemcpy(g_Vx0, Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_Vz0, Vz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_sigmaxx0, sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_sigmaxz0, sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_sigmazz0, sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyHostToDevice);
cudaMemcpy(g_m1_x, m1_x, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_m1_z, m1_z, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_aux_m2_c, aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_aux_m3_c, aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
cudaMemcpy(g_aux_m2m3_c, aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyHostToDevice);
fprintf(stderr,"Data Copy To GPU OK\n");
// data copy back from GPU mem
cudaMemcpy(Vx0, g_Vx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy( Vz0, g_Vz0,sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(sigmaxx0, g_sigmaxx0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(sigmaxz0, g_sigmaxz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(sigmazz0, g_sigmazz0, sizeof(float)*nx*nz*nt, cudaMemcpyDeviceToHost);
cudaMemcpy(m1_x, g_m1_x, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
cudaMemcpy(m1_z, g_m1_z, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
cudaMemcpy(aux_m2_c, g_aux_m2_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
cudaMemcpy(aux_m3_c, g_aux_m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
cudaMemcpy(aux_m2m3_c, g_aux_m2m3_c, sizeof(float)*nx*nz, cudaMemcpyDeviceToHost);
fprintf(stderr,"Data Copy To CPU OK\n");
cudaFree(&g_Vx0);
cudaFree(&g_Vz0);
cudaFree(&g_sigmaxx0);
cudaFree(&g_sigmazz0);
cudaFree(&g_sigmaxz0);
cudaFree(&g_m1_x);
cudaFree(&g_m1_z);
cudaFree(&g_aux_m2_c);
cudaFree(&g_aux_m3_c);
cudaFree(&g_aux_m2m3_c);
fprintf(stderr,"GPU Mem Released OK\n");
}
|
9,652 | #include <stdio.h>
__global__ void hello()
{
printf("Oi mundo! De: thread %d\n", threadIdx.x);
}
int main(void)
{
int num_threads = 2;
int num_blocks = 2;
hello<<<num_blocks,num_threads>>>();
cudaDeviceSynchronize();
return 0;
}
|
9,653 | #include <cuda_runtime.h>
#include <cuda.h>
#define TILE_SIZE 5900
#define NTHREADS 512
__global__
void tensor_transpose(int dim_input, int dim_output, int nblocks, int tile_size,
int *shape_input, int *shape_output,
float *shape_input_r, float *shape_output_r,
int *stride_input,
int *stride_output_local, int *stride_output_global,
double *input, double *output) {
__shared__ double tile[TILE_SIZE];
for (int block_idx = blockIdx.x; block_idx < nblocks; block_idx += gridDim.x) {
int it = block_idx, im = 0, offset1 = 0;
for (int i = 0; i < dim_input; i++) {
im = it / shape_input[i];
offset1 += stride_input[i] * (it - im * shape_input[i]);
it = im;
}
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
tile[i] = input[i + block_idx * tile_size];
}
__syncthreads();
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
it = i;
int offset2 = 0, local_offset = 0;
for (int j = 0; j < dim_output; j++) {
im = it / shape_output[j];
int tmp = it - im * shape_output[j];
offset2 += stride_output_global[j] * tmp;
local_offset += stride_output_local[j] * tmp;
it = im;
}
output[offset1 + offset2] = tile[local_offset];
}
__syncthreads();
}
}
|
9,654 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <sys/time.h>
#include <math.h>
__global__ void kernel(int* count_d, float* randomnums)
{
int i;
double x,y,z;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
i = tid;
int xidx = 0, yidx = 0;
xidx = (i+i);
yidx = (xidx+1);
x = randomnums[xidx];
y = randomnums[yidx];
z = 1/sqrt(2*M_PI) * exp(-0.5*pow(x,2));
if (y<=z)
count_d[tid] = 1;
else
count_d[tid] = 0;
}
void CUDAErrorCheck()
{
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA error : %s (%d)\n", cudaGetErrorString(error), error);
exit(0);
}
}
int main(int argc,char* argv[])
{
int niter = atoi(argv[1]);
int repetitions = 3;
int j = 0;
for (j=0; j<repetitions; j++)
{
float *randomnums;
double phi;
cudaMalloc((void**)&randomnums, (2*niter)*sizeof(float));
// Use CuRand to generate an array of random numbers on the device
int status;
curandGenerator_t gen;
status = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MRG32K3A);
status |= curandSetPseudoRandomGeneratorSeed(gen, 2138+j);
// status |= curandSetPseudoRandomGeneratorSeed(gen, 4294967296ULL^time(NULL));
status |= curandGenerateUniform(gen, randomnums, (2*niter));
status |= curandDestroyGenerator(gen);
if (status != CURAND_STATUS_SUCCESS)
{
printf("CuRand Failure\n");
exit(EXIT_FAILURE);
}
int threads = 1000;
int blocks = 100;
int* count_d;
int *count = (int*)malloc(blocks*threads*sizeof(int));
unsigned int reducedcount = 0;
cudaMalloc((void**)&count_d, (blocks*threads)*sizeof(int));
CUDAErrorCheck();
struct timeval begin, end;
gettimeofday(&begin, NULL);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//one point per thread
kernel <<<blocks, threads>>> (count_d, randomnums);
cudaDeviceSynchronize();
CUDAErrorCheck();
cudaMemcpy(count, count_d, blocks*threads*sizeof(int), cudaMemcpyDeviceToHost);
int i = 0;
//reduce array into int
for(i = 0; i<niter; i++)
reducedcount += count[i];
cudaEventRecord(stop, 0);
float elapsedTime = 0;
cudaEventElapsedTime(&elapsedTime, start, stop);
gettimeofday(&end, NULL);
double elapsed = (end.tv_sec - begin.tv_sec) + ((end.tv_usec - begin.tv_usec)/1000000.0);
cudaFree(randomnums);
cudaFree(count_d);
free(count);
cudaEventDestroy(start);
cudaEventDestroy(stop);
phi = ((double)reducedcount/niter)*1.0 + 0.5;
printf("CUDA - area to left of 1 on standard normal: %f\n", phi);
//printf("runtime: %f\n", elapsedTime);
printf("runtime: %f\n", elapsed);
//printf("runtime: %f\n", seconds);
}
return 0;
}
|
9,655 | // ============================================ //
// Author: Federico Massa
//
// This is a CPU/GPU particle filter benchmark
// It doesn't actually do anything but replicate
// computations similar to a real particle filter,
// just to evaluate CPU/GPU performances.
// ============================================ //
#include <iostream>
#include <cstdlib>
#include <sstream>
#include <numeric>
#include <random>
#include <chrono>
int nParticles;
int sizeX, sizeY;
int laserPoints;
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::milliseconds ms;
typedef std::chrono::duration<float> fsec;
std::vector<int> closest_idx;
void generateLaserPoints(std::vector<float>& laserX, std::vector<float>& laserY)
{
for (int i = 0; i < laserX.size(); i++) {
laserX[i] = rand()/RAND_MAX;
}
for (int i = 0; i < laserY.size(); i++) {
laserY[i] = rand()/RAND_MAX;
}
}
void generateOdometry(float& vx, float& vy) {
vx = rand()/RAND_MAX;
vy = rand()/RAND_MAX;
}
__host__ __device__ void getPixel(const float& laserX, const float& laserY, int& laserPX, int& laserPY) {
laserPX = 0;
laserPY = 0;
}
__host__ __device__ float computeWeight(const int& laserPX, const int& laserPY, const int& sizeX, int* closest_idx) {
int idx = laserPX + sizeX*laserPY;
int w = closest_idx[idx];
int wx = w*0;
int wy = w*0;
return wx*wx + wy*wy;
}
__global__ void pf_iteration_dev(int* dev_closest_idx, float* dev_laserX, float* dev_laserY, float* dev_vx, float* dev_vy, float* dev_particles_x, float* dev_particles_y, float* dev_particles_theta, int* dev_laserPoints, float* dev_weights, int* dev_sizeX, int* dev_sizeY) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
// Predict
dev_particles_x[index] += *dev_vx*cos(dev_particles_theta[index]) + *dev_vy*sin(dev_particles_theta[index]);
dev_particles_x[index] += *dev_vx*cos(dev_particles_theta[index]) + *dev_vy*sin(dev_particles_theta[index]);
dev_particles_theta[index] += 1*3.14159/180.0;
// Update
float weight = 0;
for (int i = 0; i < *dev_laserPoints; i++) {
float localLaserX = dev_laserX[i]*cos(dev_particles_theta[index]) + dev_laserY[i]*sin(dev_particles_theta[index]);
float localLaserY = -dev_laserX[i]*sin(dev_particles_theta[index]) + dev_laserY[i]*cos(dev_particles_theta[index]);
int localLaserPX, localLaserPY;
// Transform laser point to pixel coordinates
getPixel(localLaserX, localLaserY, localLaserPX, localLaserPY);
weight += computeWeight(localLaserPX, localLaserPY, *dev_sizeX, dev_closest_idx);
}
dev_weights[index] = weight;
}
void pf_iteration(std::vector<int>& closest_idx, const std::vector<float>& currentlaserX, const std::vector<float>& currentlaserY, const float& vx, const float& vy, std::vector<float>& init_particles_x, std::vector<float>& init_particles_y, std::vector<float>& init_particles_theta, std::vector<float>& weights) {
for (int index = 0; index < nParticles; index++) {
// Predict
init_particles_x[index] += vx*cos(init_particles_theta[index]) + vy*sin(init_particles_theta[index]);
init_particles_y[index] += -vx*sin(init_particles_theta[index]) + vy*cos(init_particles_theta[index]);
init_particles_theta[index] += 1*3.14159/180.0;
// Update
float weight = 0;
for (int i = 0; i < laserPoints; i++) {
float localLaserX = currentlaserX[i]*cos(init_particles_theta[index]) + currentlaserY[i]*sin(init_particles_theta[index]);
float localLaserY = -currentlaserX[i]*sin(init_particles_theta[index]) + currentlaserY[i]*cos(init_particles_theta[index]);
int localLaserPX, localLaserPY;
// Transform laser point to pixel coordinates
getPixel(localLaserX, localLaserY, localLaserPX, localLaserPY);
weight += computeWeight(localLaserPX, localLaserPY, sizeX, closest_idx.data());
}
weights[index] = weight;
}
}
void gpu_test() {
int* dev_closest_idx;
int* dev_laserPoints;
float *dev_laserX, *dev_laserY;
float *dev_vx, *dev_vy;
float* dev_particles_x;
float* dev_particles_y;
float* dev_particles_theta;
float* dev_weights;
int* dev_sizeX, *dev_sizeY;
// Allocate distance map and copy it from host
cudaMalloc((void**)&dev_closest_idx, sizeX*sizeY*sizeof(int));
cudaMemcpy(dev_closest_idx, closest_idx.data(), sizeX*sizeY*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_laserX, laserPoints*sizeof(float));
cudaMalloc((void**)&dev_laserY, laserPoints*sizeof(float));
cudaMalloc((void**)&dev_vx, sizeof(float));
cudaMalloc((void**)&dev_vy, sizeof(float));
cudaMalloc((void**)&dev_particles_x, nParticles*sizeof(float));
cudaMalloc((void**)&dev_particles_y, nParticles*sizeof(float));
cudaMalloc((void**)&dev_particles_theta, nParticles*sizeof(float));
cudaMalloc((void**)&dev_weights, nParticles*sizeof(float));
cudaMalloc((void**)&dev_laserPoints, sizeof(int));
cudaMalloc((void**)&dev_sizeX, sizeof(int));
cudaMalloc((void**)&dev_sizeY, sizeof(int));
cudaMemcpy(dev_sizeX, &sizeX, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sizeY, &sizeY, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_laserPoints, &laserPoints, sizeof(int), cudaMemcpyHostToDevice);
std::vector<float> init_particles_x, init_particles_y, init_particles_theta;
init_particles_x.reserve(nParticles);
init_particles_y.reserve(nParticles);
init_particles_theta.reserve(nParticles);
// Initialize particles
for (int i = 0; i < nParticles; i++) {
init_particles_x[i] = init_particles_y[i] = init_particles_theta[i] = 0.0;
}
cudaMemcpy(dev_particles_x, init_particles_x.data(), nParticles*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_particles_y, init_particles_y.data(), nParticles*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_particles_theta, init_particles_theta.data(), nParticles*sizeof(float), cudaMemcpyHostToDevice);
// Current measurements
std::vector<float> currentLaserX(laserPoints);
std::vector<float> currentLaserY(laserPoints);
float vx, vy;
// Weights (output)
std::vector<float> weights(nParticles);
const int iterations = 100;
std::vector<float> alloc_time, compute_time, retrieve_time;
// Particle filter iteration
for (int i = 0; i < iterations; i++) {
auto t0 = Time::now();
generateLaserPoints(currentLaserX, currentLaserY);
generateOdometry(vx, vy);
// Transfer current measurements to device
cudaMemcpy(dev_laserX, currentLaserX.data(), laserPoints*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_laserY, currentLaserY.data(), laserPoints*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_vx, &vx, sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_vy, &vy, sizeof(float), cudaMemcpyHostToDevice);
// 1 particle for each core
const int THREADS = 128;
const int BLOCKS = std::ceil(float(nParticles)/float(THREADS));
auto t1 = Time::now();
// Launch an iteration
pf_iteration_dev<<<BLOCKS,THREADS>>>(dev_closest_idx, dev_laserX, dev_laserY, dev_vx, dev_vy, dev_particles_x, dev_particles_y, dev_particles_theta, dev_laserPoints, dev_weights, dev_sizeX, dev_sizeY);
cudaDeviceSynchronize();
auto t2 = Time::now();
// Retrieve results
cudaMemcpy(weights.data(), dev_weights, nParticles*sizeof(float), cudaMemcpyDeviceToHost);
auto t3 = Time::now();
fsec fs_alloc = t1 - t0;
fsec fs_compute = t2 - t1;
fsec fs_retrieve = t3 - t2;
float alloc = fs_alloc.count();
float compute = fs_compute.count();
float retrieve = fs_retrieve.count();
alloc_time.push_back(alloc);
compute_time.push_back(compute);
retrieve_time.push_back(retrieve);
}
float alloc_mean = std::accumulate(alloc_time.begin(), alloc_time.end(), 0.0)/float(iterations);
float compute_mean = std::accumulate(compute_time.begin(), compute_time.end(), 0.0)/float(iterations);
float retrieve_mean = std::accumulate(retrieve_time.begin(), retrieve_time.end(), 0.0)/float(iterations);
printf("GPU test finished, average time over %d iterations was %f: %f (alloc), %f (compute), %f (retrieve)\n", iterations, alloc_mean+compute_mean+retrieve_mean, alloc_mean, compute_mean, retrieve_mean);
// Release memory on device
cudaFree(dev_closest_idx);
cudaFree(dev_laserX);
cudaFree(dev_laserY);
cudaFree(dev_vx);
cudaFree(dev_vy);
}
void cpu_test() {
std::vector<float> init_particles_x, init_particles_y, init_particles_theta;
init_particles_x.reserve(nParticles);
init_particles_y.reserve(nParticles);
init_particles_theta.reserve(nParticles);
// Initialize particles
for (int i = 0; i < nParticles; i++) {
init_particles_x[i] = init_particles_y[i] = init_particles_theta[i] = 0.0;
}
// Current measurements
std::vector<float> currentLaserX(laserPoints);
std::vector<float> currentLaserY(laserPoints);
float vx, vy;
// Weights (output)
std::vector<float> weights(nParticles);
const int iterations = 100;
std::vector<float> tot_time;
// Particle filter iteration
for (int i = 0; i < iterations; i++) {
auto t0 = Time::now();
generateLaserPoints(currentLaserX, currentLaserY);
generateOdometry(vx, vy);
// Launch an iteration
pf_iteration(closest_idx, currentLaserX, currentLaserY, vx, vy, init_particles_x, init_particles_y, init_particles_theta, weights);
auto t3 = Time::now();
fsec fs_tot = t3 - t0;
float tot = fs_tot.count();
tot_time.push_back(tot);
}
float tot_mean = std::accumulate(tot_time.begin(), tot_time.end(), 0.0)/float(iterations);
printf("###########################################################\n");
printf("CPU test finished, average time over %d iterations was %f\n", iterations, tot_mean);
printf("###########################################################\n\n");
}
int main(int argc, char** argv) {
if (argc != 5) {
std::cout << "Please specify an integer number of particles, map sizeX, map sizeY, laserPoints" << std::endl;
exit(1);
}
std::stringstream ss;
ss << argv[1];
ss >> nParticles;
ss.clear();
ss << argv[2];
ss >> sizeX;
ss.clear();
ss << argv[3];
ss >> sizeY;
ss.clear();
ss << argv[4];
ss >> laserPoints;
ss.clear();
printf("==================================================================\n");
printf("Dummy particle filter generator with %d particles, a %dx%d map, %d laser scan points\n", nParticles, sizeX, sizeY, laserPoints);
printf("==================================================================\n\n");
// Allocate dummy map, vectorized, indicating the index of the closest black pixel in the map
// with respect to the current index
closest_idx.reserve(sizeX*sizeY);
for (int i = 0; i < closest_idx.size(); i++)
closest_idx[i] = rand();
cpu_test();
int count;
cudaGetDeviceCount(&count);
for (int i = 0; i < count; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("###################################################\n");
printf("Starting GPU benchmark on device %d with name: %s\n", i, prop.name);
cudaSetDevice(i);
gpu_test();
printf("##################################################\n\n");
}
} |
9,656 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
void print_array(float *A, int n, int m)
{
for(int i=0; i<n; i++)
{
for (int j=0; j<m; j++)
{
printf("%.1f ", A[i*m+j]);
}
printf("\n");
}
}
__global__ void
process_kernel1(float *input, float *output, int n, int m)
{
// Code for i
int i= blockIdx.y * blockDim .y+ threadIdx .y;
int j= blockIdx.x * blockDim.x+ threadIdx.x;
if ((i<n) && (j<m)) {
for(int l=0; l<n; l++){
for (int k = 0; k < m; k+=2) {
output[i*l+k] = input[i*l+k+1];
output[i*l+k+1] = input[i*l+k];
}
}
}
}
int main(void)
{
cudaError_t err = cudaSuccess;
int test_cases;
scanf("%d",&test_cases);
int m, n;
scanf("%d %d", &m, &n);
size_t size = m*n*sizeof(float);
float *h_input = (float *)malloc(size);
float *h_output = (float *)malloc(size);
if (h_input == NULL || h_output == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < n*m; ++i)
{
scanf("%f",&h_input[i]);
}
float *d_input = NULL;
err = cudaMalloc((void **)&d_input, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_input (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output = NULL;
err = cudaMalloc((void **)&d_output, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_output (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_input, h_input, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector h_input from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//launching process_kernel1
int threadsPerBlock = 16;
int blocksPerGrid = ((m*n)+threadsPerBlock-1)/threadsPerBlock;
process_kernel1<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_output, n, m);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_output from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
// Verify that the result vectors are as expected
for (int i = 0; i < numElements; ++i)
{
if (fabs(sinf(h_input1[i]) + cosf(h_input2[i]) - h_output1[i]) > 1e-5)
{
fprintf(stderr, "Result verification for h_output1 failed at element %d! value \n", i, h_input1[i]);
exit(EXIT_FAILURE);
}
}
*/
print_array(h_output,n,m);
err = cudaFree(d_input);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_input (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_output (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(h_input);
free(h_output);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
9,657 | // Modified from
// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/ball_query_gpu.cu
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void ball_query_kernel(int b, int n, int m,
float min_radius,
float max_radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float max_radius2 = max_radius * max_radius;
float min_radius2 = min_radius * min_radius;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
int cnt = 0;
for (int k = 0; k < n; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 == 0 || (d2 >= min_radius2 && d2 < max_radius2)) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[l] = k;
}
}
idx[cnt] = k;
++cnt;
if (cnt >= nsample) break;
}
}
}
void ball_query_kernel_launcher(int b, int n, int m, float min_radius, float max_radius,
int nsample, const float *new_xyz, const float *xyz,
int *idx, cudaStream_t stream) {
// new_xyz: (B, M, 3)
// xyz: (B, N, 3)
// output:
// idx: (B, M, nsample)
cudaError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK),
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
ball_query_kernel<<<blocks, threads, 0, stream>>>(b, n, m, min_radius, max_radius,
nsample, new_xyz, xyz, idx);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
9,658 | #include <iostream>
#define MAX_HASH 10
#define HASH_KEY(key) key%MAX_HASH
using namespace std;
typedef struct _Node{
int id;
_Node* hashNext;
}Node;
Node* hashTable[MAX_HASH];
void AddHashData(int key, Node* node){
int hash_key = HASH_KEY(key);
if(hashTable[hash_key] == NULL)
hashTable[hash_key] = node;
else
{
node->hashNext = hashTable[hash_key];
hashTable[hash_key] = node;
}
}
void DelHashData(int id){
int hash_key = HASH_KEY(id);
if(hashTable[hash_key] == NULL)
return;
Node* delNode = NULL;
if(hashTable[hash_key]->id == id)
{
delNode = hashTable[hash_key];
hashTable[hash_key] = hashTable[hash_key]->hashNext;
}
else
{
Node* node = hashTable[hash_key];
Node* next = node->hashNext;
while(next)
{
if(next->id == id)
{
node->hashNext = next->hashNext;
delNode = next;
break;
}
node = next;
next = node->hashNext;
}
}
free(delNode);
}
Node* FindHashData(int id){
int hash_key = HASH_KEY(id);
if(hashTable[hash_key] == NULL)
return NULL;
if(hashTable[hash_key]->id == id)
return hashTable[hash_key];
else
{
Node* node = hashTable[hash_key];
while(node->hashNext)
{
if(node->hashNext->id == id)
return node->hashNext;
node = node->hashNext;
}
}
return NULL;
}
void PrintAllHashData(){
cout << "###Print All Hash Data###" << endl;
for(int i = 0; i < MAX_HASH; i++)
{
cout << "idx" << i << " : ";
if(hashTable[i] != NULL)
{
Node* node = hashTable[i];
while(node->hashNext)
{
cout << node->id << " ";
node = node->hashNext;
}
cout << node->id << endl;
}
}
cout << endl << endl;
}
int main(){
int saveidx[101] = {0, };
for(int i = 0; i < 100; i++)
{
Node* node = (Node*) malloc(sizeof(Node));
node->id = rand() % 1000;
node->hashNext = NULL;
AddHashData(node->id, node);
saveidx[i] = node->id;
}
PrintAllHashData();
for(int i = 0; i < 50; i++)
DelHashData(saveidx[i]);
PrintAllHashData();
for(int i = 50; i < 100; i++)
DelHashData(saveidx[i]);
PrintAllHashData();
return 0;
}
|
9,659 | #include <cmath>
#include <string>
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <chrono>
#define ll long long
#define ull unsigned long long
#define next(N) N*=25214903917;N+=11;N%=281474976710656
#define next3(N) N*=233752471717045; N+=11718085204285; N%=281474976710656//simulates 3 calls to next()
#define numBlocks 4096
#define numThreadsPerBlock 256
#define M_PI 3.1415926535897932
__device__ bool getEyesFromChunkseed(ull chunkseed);
__global__ void checkSeeds(ull* start, int* d_sinLUT, int* d_cosLUT, int* d_rad, int* d_srad);
int sinLUT[1024];
int cosLUT[1024];
void calculateLUTs(){ //Thanks to jacobsjo for adding a Look-Up Table
for (int i = 0 ; i< 1024 ; i++){
sinLUT[i] = round(sin((i* M_PI) / 512.0)*2048);
cosLUT[i] = round(cos((i* M_PI) / 512.0)*2048);
}
}
int main(int argc, char* argv[]) {
using namespace std::chrono;
cudaSetDevice(0);
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
bool printTime = true;
ull searchTo = 100000000000;
ull searchFrom = 0;
int rad = 64;
int srad = 6;
for (int i = 0; i < argc; i++) {
std::string arg = std::string(argv[i]);
if (std::string(argv[i]) == "-t") {
std::string nextArg = std::string(argv[++i]);
printTime = (nextArg != "false" && nextArg != "0");
}
else if (arg == "-s"){
searchFrom = (ull)std::stoll (std::string(argv[++i]),NULL,0);
searchTo = (ull)std::stoll (std::string(argv[++i]),NULL,0);
}
else if (arg == "-r") {
rad = (int)std::stoi (std::string(argv[++i]),NULL,0);
}
else if (arg == "-sr") {
srad = (int)std::stoi (std::string(argv[++i]),NULL,0);
}
}
high_resolution_clock::time_point t1 = high_resolution_clock::now();
int *d_sinLUT, *d_cosLUT;
int *d_rad, *d_srad;
ull *d_seed;
int refresh = 0;
calculateLUTs();
cudaMalloc((void**)&d_sinLUT,sizeof(int)*1024);
cudaMalloc((void**)&d_cosLUT,sizeof(int)*1024);
cudaMalloc((void**)&d_seed,sizeof(ull));
cudaMalloc((void**)&d_rad,sizeof(int));
cudaMalloc((void**)&d_srad,sizeof(int));
cudaMemcpy(d_sinLUT,&sinLUT,sizeof(int)*1024,cudaMemcpyHostToDevice);
cudaMemcpy(d_cosLUT,&cosLUT,sizeof(int)*1024,cudaMemcpyHostToDevice);
cudaMemcpy(d_rad,&rad,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_srad,&srad,sizeof(int),cudaMemcpyHostToDevice);
//3.5ms; 12us; 3us
for(ull i = searchFrom; i<searchTo+numBlocks*numThreadsPerBlock;i+=numBlocks*numThreadsPerBlock){
cudaMemcpy(d_seed,&i,sizeof(ull),cudaMemcpyHostToDevice);
if(refresh++ % 100000 == 0) cudaDeviceSynchronize();
checkSeeds<<<numBlocks,numThreadsPerBlock>>>(d_seed, d_sinLUT, d_cosLUT, d_rad, d_srad);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
cudaFree(d_seed);
cudaFree(d_sinLUT);
cudaFree(d_cosLUT);
cudaFree(d_rad);
cudaFree(d_srad);
if (printTime) {
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
double seconds = time_span.count();
printf("%f seconds to calculate (%f seeds per second) with radius %d and small radius %d\n",seconds,(searchTo-searchFrom)/seconds,rad,srad);
}
return 0;
}
__global__ void checkSeeds(ull* start, int* d_sinLUT, int* d_cosLUT, int* d_rad, int* d_srad) { // This function is full of Azelef math magic
ull seed, RNGseed1, RNGseed2, chunkseed;
ll var8, var10;
int baseX, baseZ, chunkX, chunkZ, angle;
double dist;
seed=*start+threadIdx.x+blockIdx.x*numThreadsPerBlock;
RNGseed2 = seed ^ 25214903917;
next3(RNGseed2);
dist = 160+(RNGseed2/2199023255552);
if(dist > *d_rad*4) return;
RNGseed1 = seed ^ 25214903917;
next(RNGseed1);
var8 = (RNGseed1 >> 16) << 32;
angle = RNGseed1/274877906944;
next(RNGseed1);
var8 += (int) (RNGseed1 >> 16); //Don't ask me why there is a conversion to int here, I don't know either. //because its mojang's bad code -geo
var8 = var8 / 2 * 2 + 1;
var10 = (RNGseed2 >> 16) << 32;
next(RNGseed2);
var10 += (int) (RNGseed2 >> 16);
var10 = var10 / 2 * 2 + 1;
baseX = (*(d_cosLUT+angle) * dist) / 8192;
baseZ = (*(d_sinLUT+angle) * dist) / 8192;
for (chunkX = baseX - *d_srad; chunkX <= baseX + *d_srad; chunkX++) {
for (chunkZ = baseZ - *d_srad; chunkZ <= baseZ + *d_srad; chunkZ++) {
chunkseed = (var8 * chunkX + var10 * chunkZ) ^ (seed ^ 25214903917);
if (getEyesFromChunkseed(chunkseed)) {
printf("%llu %d %d\n",seed,chunkX,chunkZ);
goto end;
}
}
}
end:
angle = 0;
}
__device__ bool getEyesFromChunkseed(ull chunkseed) {
// chunkseed = chunkseed ^ 25214903917; //This is the equivalent of starting a new Java RNG //WTF WAS JENDRIK THINKING
chunkseed *= 124279299069389; //This line and the one after it simulate 761 calls to next() (761 was determined by CrafterDark)
chunkseed += 17284510777187;
chunkseed %= 281474976710656;
//Xero's branch-reduced code
//This code is better than Azelef's because it gets all the 11-eye ones
//instead of discarding a sixth of the seeds
int failcount = (chunkseed <= 253327479039590);//eye 1
//commenting out eye 1, 10, 11 is optimal for branch reducing
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 2
if(failcount == 2)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 3
if(failcount == 2)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 4
if(failcount == 2)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 5
if(failcount == 2)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 6
if(failcount == 2)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 7
if(failcount == 2)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 8
if(failcount == 2)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 9
if(failcount == 2)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 10
//if(failcount > 12-eye_count)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 11
//if(failcount > 12-eye_count)return false;
next(chunkseed);
failcount += (chunkseed <= 253327479039590);//eye 12
return failcount == 1; // we want exactly 1 eye of ender to be missing
}
|
9,660 | // Ex. 4
// =====
// Compile withouth the flag -arch sm_20
//
// The result is: it works as before (with the flag), on my platform.
#include <stdio.h>
__global__
void helloFromGPU() {
printf("Hello World from GPU!\n");
}
int main(int argc, char *argv[]) {
// Hello from CPU
printf("Hello World from CPU!\n");
helloFromGPU<<<1, 10>>>();
cudaDeviceReset();
return 0;
}
|
9,661 | #include<iostream>
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<time.h>
#define N 2048
using namespace std;
__global__ void Max(int *a)
{
int tid = threadIdx.x;
int step_size = 1;
int num_of_threads = blockDim.x;
while(num_of_threads>0){
if(tid<num_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(a[second]>a[first])
a[first]=a[second];
}
step_size<<=1;
num_of_threads>>=1;
}
}
__global__ void Min(int *a)
{
int tid = threadIdx.x;
int step_size = 1;
int num_of_threads = blockDim.x;
while(num_of_threads>0){
if(tid<num_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(a[second]<a[first])
a[first]=a[second];
}
step_size<<=1;
num_of_threads>>=1;
}
}
__global__ void sum(int *a)
{
int tid = threadIdx.x;
int step_size = 1;
int num_of_threads = blockDim.x;
while(num_of_threads>0){
if(tid<num_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
a[first]+=a[second];
}
step_size<<=1;
num_of_threads>>=1;
}
}
__global__ void mean_diff(float *a,float mean)
{
a[threadIdx.x]-=mean;
a[threadIdx.x]*=a[threadIdx.x];
}
__global__ void _std(float *a,int n)
{
int tid = threadIdx.x;
int step_size = 1;
int num_of_threads = blockDim.x;
while(num_of_threads>0){
if(tid<num_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
a[first]+=a[second];
}
step_size<<=1;
num_of_threads>>=1;
}
a[0]/=a[0]/n;
}
int main()
{
int *a = (int *)malloc(N*sizeof(int));
float *f = (float *)malloc(N*sizeof(float));
//srand(time(0));
for(int i=0;i<N;i++){
a[i]=rand()%10;
f[i]=float(a[i]);
}
int *a_cuda;
float *f_cuda;
int answer;
cudaMalloc((void **)&a_cuda,N*sizeof(int));
//MAX
cudaMemcpy(a_cuda,a,N*sizeof(int),cudaMemcpyHostToDevice);
Max<<<1,N/2>>>(a_cuda);
cudaMemcpy(&answer,a_cuda,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"Max : "<<answer<<endl;
//MIN
cudaMemcpy(a_cuda,a,N*sizeof(int),cudaMemcpyHostToDevice);
Min<<<1,N/2>>>(a_cuda);
cudaMemcpy(&answer,a_cuda,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"Min : "<<answer<<endl;
//SUM
cudaMemcpy(a_cuda,a,N*sizeof(int),cudaMemcpyHostToDevice);
sum<<<1,N/2>>>(a_cuda);
cudaMemcpy(&answer,a_cuda,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"Sum : "<<answer<<endl;
//MEAN
float mean = float(answer)/N;
cout<<"Mean : "<<mean<<endl;
cudaFree(a_cuda);
cudaMalloc((void **)&f_cuda,N*sizeof(float));
float result;
//STD
cudaMemcpy(f_cuda,f,N*sizeof(float),cudaMemcpyHostToDevice);
mean_diff<<<1,N>>>(f_cuda,mean);
_std<<<1,N/2>>>(f_cuda,N);
cudaMemcpy(&result,f_cuda,sizeof(float),cudaMemcpyDeviceToHost);
cout<<"Variance : "<<result<<endl;
cout<<"Std. Dev. : "<<sqrt(result)<<endl;
cudaFree(f_cuda);
cout<<endl;
return 0;
} |
9,662 | #include <stdio.h>
__global__ void check_atomic(float* total)
{
atomicAdd(total, 1.0f);
}
int main()
{
float* total;
cudaMallocManaged(&total, sizeof(float));
*total = 0.0;
check_atomic<<<1, 32>>>(total);
cudaDeviceSynchronize();
printf("computed %.1lf, while true is %.1f\n", *total, 32.0);
cudaFree(total);
return 0;
}
|
9,663 | /*
* This program uses the device CURAND API to calculate what
* proportion of pseudo-random ints have low bit set.
* It then generates uniform results to calculate how many
* are greater than .5.
* It then generates normal results to calculate how many
* are within one standard deviation of the mean.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
__global__ void setup_kernel(curandState *state)
{
int id = threadIdx.x + blockIdx.x * 64;
/* Each thread gets same seed, a different sequence
number, no offset */
curand_init(1234, id, 0, &state[id]);
//curand_init(id, 0, 0, &state[id]);
}
__global__ void generate_normal_kernel(curandState *state,
int n,
float *result)
{
int id = threadIdx.x + blockIdx.x * 64;
float x;
/* Copy state to local memory for efficiency */
curandState localState = state[id];
/* Generate pseudo-random normals */
for(int i = 0; i < n; i++) {
x = curand_normal(&localState);
}
/* Copy state back to global memory */
state[id] = localState;
/* Store results */
result[id] = x;
}
int main(int argc, char *argv[])
{
int i;
unsigned int total;
curandState *devStates;
//unsigned int *devResults, *hostResults;
float *devResults, *hostResults;
bool useMRG = 0;
bool usePHILOX = 0;
int sampleCount = 10000;
bool doubleSupported = 0;
int device;
struct cudaDeviceProp properties;
/* check for double precision support */
CUDA_CALL(cudaGetDevice(&device));
CUDA_CALL(cudaGetDeviceProperties(&properties,device));
if ( properties.major >= 2 || (properties.major == 1 && properties.minor >= 3) ) {
doubleSupported = 1;
}
/* Allocate space for results on host */
hostResults = (float *)calloc(64 * 64, sizeof(float));
/* Allocate space for results on device */
CUDA_CALL(cudaMalloc((void **)&devResults, 64 * 64 *
sizeof(float)));
/* Set results to 0 */
CUDA_CALL(cudaMemset(devResults, 0, 64 * 64 *
sizeof(float)));
/* Allocate space for prng states on device */
CUDA_CALL(cudaMalloc((void **)&devStates, 64 * 64 *
sizeof(curandState)));
/* Setup prng states */
setup_kernel<<<64, 64>>>(devStates);
/* Set results to 0 */
CUDA_CALL(cudaMemset(devResults, 0, 64 * 64 *
sizeof(float)));
/* Generate and use normal pseudo-random */
for(i = 0; i < 50; i++) {
generate_normal_kernel<<<64, 64>>>(devStates, sampleCount, devResults);
}
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 64 *
sizeof(float), cudaMemcpyDeviceToHost));
/* Show result */
total = 0;
for(i = 0; i < 64 * 64; i++) {
printf("%lf\n",hostResults[i]);
}
/* Cleanup */
CUDA_CALL(cudaFree(devStates));
CUDA_CALL(cudaFree(devResults));
free(hostResults);
return EXIT_SUCCESS;
}
|
9,664 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void AddIntsCUDA(int* a, int* b){
a[0] += b[0];
}
int main(){
int a = 5;
int b = 9;
int *d_a;
int *d_b;
cudaMalloc(&d_a, sizeof(int));
cudaMalloc(&d_b, sizeof(int));
cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice);
AddIntsCUDA <<<1, 1 >>>(d_a, d_b);
cudaMemcpy(&a, d_a, sizeof(int), cudaMemcpyDeviceToHost);
cout << "the answer is: " << a << endl;
return 0;
}
//Need Error Checking i.e. DeAllocation in case of failure. |
9,665 | /* Program : To find the matrix multiplication of rectangular matrices using tiling
* Author : Anant Shah
* Date : 11-9-2018
* Roll Number : EE16B105
**/
#include<stdio.h>
#define ERROR_HANDLER(error_msg,line) error_handler(error_msg,line)
#define ROWS_M 8192
#define COLS_M 16384
#define ROWS_N 16384
#define COLS_N 32768
#define NUM_THREADS_X 32
#define NUM_THREADS_Y 32
#define TILE_WIDTH_X 32
#define TILE_WIDTH_Y 32
void error_handler(cudaError_t error_msg,int line){
if(error_msg!=cudaSuccess){
printf("%s in %s at %d",cudaGetErrorString(error_msg),__FILE__,line);
exit(EXIT_FAILURE);
}
}
void fill_matrix(double *mat,unsigned numRows,unsigned numCols){
/* function to fill a mtrix with values */
for(unsigned i=0;i<numRows;i++){
for(unsigned j=0;j<numCols;j++){
mat[i*numCols+j] = i*2.1f + j*3.2f;
}
}
}
void print_matrix_to_file(double *mat,unsigned numRows,unsigned numCols){
/* function to print a matrix to a file */
const char *fname = "assignment2_out";
FILE *f = fopen(fname,"a");
for(unsigned i=0;i<numRows;i++){
for(unsigned j=0;j<numCols;j++){
fprintf(f,"%4.4f ", mat[i*numCols+j]);
}
fprintf(f,"\n");
}
fclose(f);
}
__global__ void matrixMul(double *M,double *N,double *P,int numRows_M,int numCols_N,int width){
/* Kernel function to calculate the matrix multiplication of rectangular matrices */
int tx = threadIdx.x; /* Thread-ID in the x-direction */
int ty = threadIdx.y; /* Thread-ID in the y-direction */
__shared__ double Ms[TILE_WIDTH_Y][TILE_WIDTH_X]; /* Shared memory to be used by threads in a block */
__shared__ double Ns[TILE_WIDTH_X][TILE_WIDTH_Y]; /* Shared memory to be used by therads in a block */
int row = blockIdx.y*blockDim.y + ty; /* row in the output matrix */
int col = blockIdx.x*blockDim.x + tx; /* column in the output matrix */
double pSum = 0.0;
for(int m=0;m<(width+NUM_THREADS_X-1)/NUM_THREADS_X;m++){
/* Load Pahse : Load elements cooperatively into the shared memeory */
Ms[ty][tx] = M[row*width+m*TILE_WIDTH_X+tx];
Ns[ty][tx] = N[(ty+m*TILE_WIDTH_Y)*numCols_N+col]; /* This is assuming that the tile is a sqaure */
__syncthreads();
for(int i=0;i<TILE_WIDTH_X;i++){
pSum += Ms[ty][i]*Ns[i][tx];
}
__syncthreads();
}
P[row*numCols_N+col] = pSum;
}
int main(int argc,char **argv){
if(argc!=1){
printf("error : Invalid number of arguments\n");
exit(EXIT_FAILURE);
}
if(COLS_M!=ROWS_N){
printf("Error : Invalid matrix dimensions");
exit(EXIT_FAILURE);
}
/************************************* Variable Initialization **************************************/
double *h_M; /*Rectangular matrix M on the host */
double *d_M; /*Rectangular matrix M on the device */
size_t size_M; /* Size of the rectangular matrix M in bytes */
double *h_N; /* Rectangular matrix N on the host */
double *d_N; /* Rectangular matrix N on the device */
size_t size_N; /* Size of the matrix N in bytes */
double *h_P; /* Product M*N on the host */
double *d_P; /* Product M*N on the device */
size_t size_P; /* Size of the matrix P in bytes */
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
size_M = sizeof(double)*ROWS_M*COLS_M;
size_N = sizeof(double)*ROWS_N*COLS_N;
size_P = sizeof(double)*ROWS_M*COLS_N;
/************************************** Memory Allocation on the Host ********************************/
h_M = (double *)malloc(size_M);
h_N = (double *)malloc(size_N);
h_P = (double *)malloc(size_P);
/************************************** Initialize the matrices ***************************************/
fill_matrix(h_M,ROWS_M,COLS_M);
fill_matrix(h_N,ROWS_N,COLS_N);
/************************************** Allocate memory on the device *********************************/
ERROR_HANDLER(cudaMalloc((void **)&d_M,size_M),__LINE__);
ERROR_HANDLER(cudaMalloc((void **)&d_N,size_N),__LINE__);
ERROR_HANDLER(cudaMalloc((void **)&d_P,size_P),__LINE__);
/************************************** Copy Matrices to the device ***********************************/
ERROR_HANDLER(cudaMemcpy(d_M,h_M,size_M,cudaMemcpyHostToDevice),__LINE__);
ERROR_HANDLER(cudaMemcpy(d_N,h_N,size_N,cudaMemcpyHostToDevice),__LINE__);
/************************************** Kernel invocation *********************************************/
dim3 threads(NUM_THREADS_X,NUM_THREADS_Y); /*2-D layout of the threads in a block */
dim3 blocks((COLS_N+NUM_THREADS_X-1)/NUM_THREADS_X,(ROWS_M+NUM_THREADS_Y-1)/NUM_THREADS_Y); /*2-D layout of blocks in a grid */
cudaEventRecord(start);
matrixMul<<<blocks,threads>>>(d_M,d_N,d_P,ROWS_M,COLS_N,COLS_M); /* The last parameter could have been <ROWS_N> */
cudaEventRecord(stop);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size_P,cudaMemcpyDeviceToHost),__LINE__);
cudaEventSynchronize(stop);
float run_time = 0.0;
cudaEventElapsedTime(&run_time,start,stop);
printf("Run-Time(seconds) : %.4f",run_time/1000);
print_matrix_to_file(h_P,ROWS_M,COLS_N);
/********************************** Free Allocated Memory ********************************************/
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
free(h_M);
free(h_N);
free(h_P);
}
|
9,666 | #include "includes.h"
__global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] += bias[i % nBias];
} |
9,667 | /*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common.cuh"
#include <cuda_runtime_api.h>
#include <math_constants.h>
#include <stdint.h>
__global__ void lst_sq1_fwd_f32_kernel(
uint32_t batch_sz,
const float *x,
const float *target,
float *loss,
uint32_t do_clip)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < batch_sz) {
float x_i = x[idx];
float t_i = target[idx];
float delta = x_i - t_i;
if (do_clip) {
if (fabs(delta) > 1.0f) {
loss[idx] = fabs(delta);
} else {
loss[idx] = 0.5f * delta * delta;
}
} else {
loss[idx] = 0.5f * delta * delta;
}
}
}
extern "C" void arraydiff_cuda_kernel_lst_sq1_fwd_f32(
size_t batch_sz,
const float *x,
const float *target,
float *loss,
uint32_t do_clip,
cudaStream_t stream)
{
lst_sq1_fwd_f32_kernel<<<(batch_sz+1024-1)/1024, 1024, 0, stream>>>(
batch_sz, x, target, loss, do_clip);
}
__global__ void lst_sq_block_fwd_f32_kernel(
uint32_t block_dim,
uint32_t num_blocks,
const float *x,
const float *target,
float *loss,
uint32_t do_clip)
{
__shared__ float cache[1024];
uint32_t tid = threadIdx.x;
uint32_t block = blockIdx.x;
uint32_t idx = tid + block_dim * block;
if (tid < block_dim && block < num_blocks) {
float x_i = x[idx];
float t_i = target[idx];
float delta = x_i - t_i;
if (do_clip) {
if (fabs(delta) > 1.0f) {
cache[tid] = fabs(delta);
} else {
cache[tid] = 0.5f * delta * delta;
}
} else {
cache[tid] = 0.5f * delta * delta;
}
} else {
cache[tid] = -CUDART_INF_F;
}
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (tid < block_dim && block < num_blocks) {
loss[block] = cache[0];
}
}
extern "C" void arraydiff_cuda_kernel_lst_sq_block_fwd_f32(
size_t block_dim,
size_t num_blocks,
const float *x,
const float *target,
float *loss,
uint32_t do_clip,
cudaStream_t stream)
{
lst_sq_block_fwd_f32_kernel<<<num_blocks, 1024, 0, stream>>>(
block_dim, num_blocks, x, target, loss, do_clip);
}
__global__ void lst_sq_bwd_f32_kernel(
uint32_t dim,
uint32_t batch_sz,
const float *x,
const float *target,
const float *df,
float *dx,
uint32_t do_clip)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t i = idx % dim;
uint32_t batch_idx = idx / dim;
if (i < dim && batch_idx < batch_sz) {
float delta = x[idx] - target[idx];
if (do_clip) {
float clipped_delta = max(-1.0f, min(delta, 1.0f));
dx[idx] += df[batch_idx] * clipped_delta;
} else {
dx[idx] += df[batch_idx] * delta;
}
}
}
extern "C" void arraydiff_cuda_kernel_lst_sq_bwd_f32(
size_t dim,
size_t batch_sz,
const float *x,
const float *target,
const float *df,
float *dx,
uint32_t do_clip,
cudaStream_t stream)
{
uint32_t n = dim * batch_sz;
lst_sq_bwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
dim, batch_sz, x, target, df, dx, do_clip);
}
|
9,668 | // This example demonstrates the use of shared per-block variables to
// implement an optimized adjacent difference algorithm. In this example,
// a per-block __shared__ array acts as a "bandwidth multiplier" by eliminating
// redundant loads issued by neighboring threads.
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <iostream>
// compute the number of lines of code each implementation requires
const unsigned int simple_implementation_begin = __LINE__;
// a simple version of adjacent_difference which issues redundant loads from off-chip global memory
__global__ void adjacent_difference_simple(int *result, int *input)
{
// compute this thread's global index
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i > 0)
{
// each thread loads two elements from global memory
int x_i = input[i];
int x_i_minus_one = input[i-1];
// compute the difference using values stored in registers
result[i] = x_i - x_i_minus_one;
}
}
const unsigned int simple_implementation_size = __LINE__ - simple_implementation_begin;
const unsigned int optimized_implementation_begin = __LINE__;
// an optimized version of adjacent_difference which eliminates redundant loads
__global__ void adjacent_difference(int *result, int *input)
{
// a __shared__ array with one element per thread
// the size of the array is allocated dynamically upon kernel launch
extern __shared__ int s_data[];
// each thread reads one element to s_data
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
// since one array gets allocated per-block, we index the array using our
// per-block thread index, threadIdx
// the global array, input, is indexed as usual
s_data[threadIdx.x] = input[i];
// avoid race condition: ensure all loads to s_data complete before we try to read from it
__syncthreads();
if(threadIdx.x > 0)
{
// compute the difference directly from s_data
// it is implemented in fast, on-chip memory
result[i] = s_data[threadIdx.x] - s_data[threadIdx.x - 1];
}
else if(i > 0)
{
// handle thread block boundary
// the first thread in a block needs data that was read by the
// last thread of the previous block into its shared array
// this thread can't access that array, so issue one redundant load per block
result[i] = s_data[threadIdx.x] - input[i-1];
}
}
const unsigned int optimized_implementation_size = __LINE__ - optimized_implementation_begin;
int main(void)
{
// create a large workload so we can easily measure the
// performance difference of both implementations
const size_t block_size = 512;
const size_t num_blocks = (1<<24) / block_size;
const size_t n = num_blocks * block_size;
// generate random input on the host
std::vector<int> h_input(n);
std::generate(h_input.begin(), h_input.end(), rand);
// allocate storage for the device
int *d_input = 0, *d_result = 0;
cudaMalloc((void**)&d_input, sizeof(int) * n);
cudaMalloc((void**)&d_result, sizeof(int) * n);
// copy input to the device
cudaMemcpy(d_input, &h_input[0], sizeof(int) * n, cudaMemcpyHostToDevice);
// time the kernel launches using CUDA events
cudaEvent_t launch_begin, launch_end;
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
// to get accurate timings, launch a single "warm-up" kernel
// dynamically allocate the __shared__ array by passing its
// size in bytes to the 3rd parameter of the triple chevrons
adjacent_difference_simple<<<num_blocks,block_size,block_size*sizeof(int)>>>(d_result, d_input);
const size_t num_launches = 100;
// time many kernel launches and take the average time
float average_simple_time = 0;
for(int i = 0; i < num_launches; ++i)
{
// record a CUDA event immediately before and after the kernel launch
cudaEventRecord(launch_begin,0);
adjacent_difference_simple<<<num_blocks,block_size,block_size*sizeof(int)>>>(d_result, d_input);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
// measure the time spent in the kernel
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_simple_time += time;
}
average_simple_time /= num_launches;
// now time the optimized kernel
// again, launch a single "warm-up" kernel
adjacent_difference<<<num_blocks,block_size,block_size*sizeof(int)>>>(d_result, d_input);
// time many kernel launches and take the average time
float average_optimized_time = 0;
for(int i = 0; i < num_launches; ++i)
{
// record a CUDA event immediately before and after the kernel launch
cudaEventRecord(launch_begin,0);
adjacent_difference<<<num_blocks,block_size,block_size*sizeof(int)>>>(d_result, d_input);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
// measure the time spent in the kernel
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_optimized_time += time;
}
average_optimized_time /= num_launches;
// report the effective throughput of each kernel in GB/s
// the effective throughput is measured as size of input read + size of output written divided by time
float simple_throughput = static_cast<float>(2 * n * sizeof(int)) / (average_simple_time / 1000.0f) / 1000000000.0f;
float optimized_throughput = static_cast<float>(2 * n * sizeof(int)) / (average_optimized_time / 1000.0f) / 1000000000.0f;
// compute throughput per line of code to measure how productive we were
float simple_throughput_per_sloc = simple_throughput / simple_implementation_size;
float optimized_throughput_per_sloc = optimized_throughput / optimized_implementation_size;
std::cout << "Work load size: " << n << std::endl;
std::cout << "Simple implementation SLOCs: " << simple_implementation_size << std::endl;
std::cout << "Optimized implementation SLOCs: " << optimized_implementation_size << std::endl << std::endl;
std::cout << "Throughput of simple kernel: " << simple_throughput << " GB/s" << std::endl;
std::cout << "Throughput of optimized kernel: " << optimized_throughput << " GB/s" << std::endl;
std::cout << "Performance improvement: " << optimized_throughput / simple_throughput << "x" << std::endl;
std::cout << std::endl;
std::cout << "Throughput of simple kernel per line of code: " << simple_throughput_per_sloc << " GB/s/sloc" << std::endl;
std::cout << "Throughput of optimized kernel per line of code: " << optimized_throughput_per_sloc << " GB/s/sloc" << std::endl;
std::cout << "Performance improvement per line of code: " << optimized_throughput_per_sloc / simple_throughput_per_sloc << "x" << std::endl;
// destroy the CUDA events
cudaEventDestroy(launch_begin);
cudaEventDestroy(launch_end);
// deallocate device memory
cudaFree(d_input);
cudaFree(d_result);
return 0;
}
|
9,669 | extern "C"
__global__ void dropoutRuntimeKernel (int numberEntries, float keepProbability, float* input, float* result)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < numberEntries) {
result[index] = keepProbability * input[index];
}
} |
9,670 | #include "my_cudamemset.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
template<typename T>
__global__ void initKernel(T * devPtr, const T val, const size_t nwords)
{
int tidx = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tidx < nwords; tidx += stride)
devPtr[tidx] = val;
}
extern "C"
void my_cudamemset(float *(&d_devPtr), size_t size, const float val){
int blockSize = 768;
int gridSize = 4;
gridSize = (size + blockSize - 1) / blockSize;
initKernel<float><<<gridSize, blockSize >> >(d_devPtr, val, size);
cudaDeviceSynchronize();
} |
9,671 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void divergence_test_ker()
{
if(threadIdx.x % 2 == 0)
printf("threadIdx.x %d : This is an even thread.\n", threadIdx.x);
else
printf("threadIdx.x %d : This is an odd thread.\n", threadIdx.x);
}
__host__ int main()
{
cudaSetDevice(0);
divergence_test_ker <<<1, 32>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
}
|
9,672 | /**
* CSE5441: Lab 4 Problem 1
**/
#include<time.h>
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define THRESHOLD 1e-4
// Get the the epoch time in seconds. We don't care about the timezone because
// we will be subtracting the two time values to measure the time spend by the program.
double rtclock(void)
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d", stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
// Perform matrix transpose multiply on GPU device.
__global__ void MatrixTransposeMultiplyDevice(double *A, double *C, int *matrixSize) {
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
#pragma unroll 64
for(int k = 0; k < *matrixSize; k++)
C[tidx * (*matrixSize) + tidy] += A[k * (*matrixSize) + tidx] * A[k * (*matrixSize) + tidy];
}
// Perform matrix transpose multiply on CPU.
void MatrixTransposeMultiplyHost(double *A, double *C, int dim) {
for (int i = 0; i < dim; i++)
for (int j = 0; j < dim; j++) {
double sum = 0;
for(int k = 0; k < dim; k++)
sum += A[k * dim + i] * A[k * dim +j];
C[i * dim + j] = sum;
}
}
// Validate the results from the GPU computation with those from the CPU computation.
// Return 1 if the results match otherwise returns 0.
int MatrixTransposeMultiplyHostValidate(double *A, double *C, int dim)
{
for (int i = 0; i < dim; i++)
for (int j = 0; j < dim; j++) {
double diff = C[i * dim + j] - A[i * dim + j];
if(diff < 0) diff *= -1;
// Since the GPU and CPU may differ somewhat in their floating computation,
// if the difference between the values computes on the GPU and CPU is more
// than a very small threshold, we will treat that as a correct computation.
if(diff > THRESHOLD) {
return 0;
}
}
return 1;
}
// Intialize the matrix for the problem with random values from 1.0 to 2.0.
void initMatrix(double *A, int dim) {
for (int i= 0; i< dim; i++)
for (int j = 0; j < dim; j++)
A[i* dim + j] = ((double)rand() / RAND_MAX) + 1.0;
}
int main(void) {
double *A, *C;
// Since the CPU run time for the problem was too high, to limit the run time
// of the problem, I am using a matrix of size 256 * 256.
int dim = 2048;
double *d_A, *d_C;
int *d_matrixSize;
size_t memSize = dim * dim * sizeof(double);
// Allocate memory for the matrices.
A = (double *) malloc(memSize);
C = (double *) calloc(dim * dim, sizeof(double));
// Load A.
initMatrix(A, dim);
// Define thread hierarchy
int nblocks= dim/16;
int tpb= 16;
// Allocate device memory
cudaMalloc( (void**) &d_A, memSize);
cudaMalloc( (void**) &d_C, memSize);
cudaMalloc( (void**) &d_matrixSize, sizeof(int));
// Initialize device memory
cudaMemcpy(d_A, A, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixSize, &dim, sizeof(int), cudaMemcpyHostToDevice);
// Measure the time for the computation
double start_time, end_time;
// Start time for matrix transpose multiply on CPU
start_time = rtclock();
// Compute matrix transpose multiply on the host
MatrixTransposeMultiplyHost(A, C, dim);
// End time for matrix transpose multiply on CPU
end_time = rtclock();
// Print stats for the CPU
printf("Time taken for matrix transpose multiply on CPU (sec) = %.5f, Performance (GFlops/sec) = %.5f\n", end_time - start_time, (2L * dim * dim * dim)/ (1e9 * (end_time - start_time)));
// Launch kernel
dim3 dimGrid(nblocks, nblocks);
dim3 dimBlock(tpb, tpb);
// Start time for matrix transpose multiply on GPU
start_time = rtclock();
// Perform matrix transpose multiply on GPU device
MatrixTransposeMultiplyDevice<<< dimGrid, dimBlock>>>(d_A, d_C, d_matrixSize);
// Do a cuda synchronize to ensure that the GPU execution finishes for timing it
cudaThreadSynchronize();
// End time for matrix transpose multiply on GPU
end_time = rtclock();
// Print stats for the GPU
printf("Time taken for matrix transpose multiply on GPU (sec) = %.5f, Performance (GFlops/sec) = %.5f\n", end_time - start_time, (2L * dim * dim * dim)/ (1e9 * (end_time - start_time)));
// Retrieve results
cudaMemcpy(A, d_C, memSize, cudaMemcpyDeviceToHost);
// Verfiy results between the CPU and GPU
if(!MatrixTransposeMultiplyHostValidate(C, A, dim))
fprintf(stderr, "Wrong results for matrix transpose multiply on GPU\n");
// Free memory
cudaFree(d_A);
cudaFree(d_C);
cudaFree(d_matrixSize);
free(A);
free(C);
}
|
9,673 |
#include <cuda.h>
__global__ void volume_max(
const int num_elements,
float * v1,
const float * v2
){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_elements) return;
if (v2[i] > v1[i])
v1[i] = v2[i];
}
|
9,674 | /**
* @file HelloWorld.cu
*
* @author btran
*
* @date 2020-05-02
*
* Copyright (c) organization
*
*/
#include <iostream>
__global__ void helloworld1()
{
// compute local thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z;
// compute local block ID
int bx = blockIdx.x;
int by = blockIdx.y;
int bz = blockIdx.z;
printf("Hello from thread (%d, %d, %d) in block (%d, %d, %d) \n", tx, ty,
tz, bx, by, bz);
}
int main(int argc, char *argv[])
{
helloworld1<<<1, 10>>>();
cudaDeviceSynchronize();
return 0;
}
|
9,675 | #include <stdio.h>
__global__ void init_numbers(int *d_in) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
d_in[index] = index+1;
}
__global__ void is_even_predicate(const int *d_in, int *d_result) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
d_result[index] = d_in[index] % 2 == 0;
}
__global__ void primitive_sum_scan(const int *d_in, int *d_out) {
extern __shared__ int sdata[];
int index = threadIdx.x;
// Make it exclusive
sdata[index] = (index > 0) ? d_in[index-1] : 0;
__syncthreads();
for (int offset = 1; offset < blockDim.x; offset*=2) {
int value = sdata[index];
int neighbour = (offset <= index) ? sdata[index - offset] : 0;
__syncthreads();
sdata[index] = value + neighbour;
__syncthreads();
}
d_out[index] = sdata[index];
}
__global__ void scatter(const int *d_numbers, const int *d_predicates, const int *d_positions, int *d_out) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (d_predicates[index]) {
int position = d_positions[index];
d_out[position] = d_numbers[index];
}
}
int main(int argc, char const **argv) {
int THREAD_COUNT = 1024;
int BLOCK_COUNT = 1;
int *d_numbers, *d_predicates, *d_scatter_positions, *d_result;
cudaMalloc((void **) &d_numbers, sizeof(int) * BLOCK_COUNT * THREAD_COUNT);
cudaMalloc((void **) &d_predicates, sizeof(int) * BLOCK_COUNT * THREAD_COUNT);
cudaMalloc((void **) &d_scatter_positions, sizeof(int) * BLOCK_COUNT * THREAD_COUNT);
cudaMalloc((void **) &d_result, sizeof(int) * BLOCK_COUNT * THREAD_COUNT);
init_numbers<<<BLOCK_COUNT, THREAD_COUNT>>>(d_numbers);
is_even_predicate<<<BLOCK_COUNT, THREAD_COUNT>>>(d_numbers, d_predicates);
primitive_sum_scan<<<BLOCK_COUNT, THREAD_COUNT, BLOCK_COUNT * THREAD_COUNT * sizeof(int)>>>(d_predicates, d_scatter_positions);
scatter<<<BLOCK_COUNT, THREAD_COUNT>>>(d_numbers, d_predicates, d_scatter_positions, d_result);
int h_result[BLOCK_COUNT * THREAD_COUNT];
cudaMemcpy(h_result, d_result, sizeof(int) * BLOCK_COUNT * THREAD_COUNT, cudaMemcpyDeviceToHost);
for (int i = 0; i < 512; i++) {
printf("%i\n", h_result[i]);
}
cudaFree(d_numbers);
cudaFree(d_predicates);
cudaFree(d_scatter_positions);
return 0;
}
|
9,676 | #include "includes.h"
__global__ void update_disp_veloc_kernel(float * displ, float * veloc, float * accel, const int size, const float deltat, const float deltatsqover2, const float deltatover2){
int id;
id = threadIdx.x + (blockIdx.x) * (blockDim.x) + (blockIdx.y) * ((gridDim.x) * (blockDim.x));
if (id < size) {
displ[id] = displ[id] + (deltat) * (veloc[id]) + (deltatsqover2) * (accel[id]);
veloc[id] = veloc[id] + (deltatover2) * (accel[id]);
accel[id] = 0.0f;
}
} |
9,677 | // includes, system
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <cuda_runtime.h>
#define N (1 << 22)
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void multiplyCPU(const float *h_v1, const float *h_v2, float *h_out) {
for (int i = 0; i < N; i++) {
h_out[i] = h_v1[i] * h_v2[i];
}
}
void expensiveFunctionCPU(const float *h_v1, const float *h_v2, float *h_out) {
for (int i = 0; i < N; i++) {
float a = h_v1[i], b = h_v2[i];
h_out[i] = (a * b) * (sqrt(a + b) + sqrt(a) + sqrt(b - a) + sqrt(b));
}
}
__global__ void multiplyGPU(const float *g_v1, const float *g_v2, float *g_out) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
g_out[index] = g_v1[index] * g_v2[index];
}
__global__ void expensiveFunctionGPU(const float *g_v1, const float *g_v2, float *g_out) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
float a = g_v1[index], b = g_v2[index];
g_out[index] = (a * b) * (sqrt(a + b) + sqrt(a) + sqrt(b - a) + sqrt(b));
}
///////////////////////////////////////////////////////////////////////////////
// Program main
///////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv) {
///////////////////// LOTS OF INITIALIZATION CODE ////////////////////////
// pointers to host vectors v1 and v1
float *h_v1, *h_v2;
// pointers to host output for multiply and expensive
float *h_multiply_correct, *h_expensive_correct;
// pointers to store results from gpu functions on the host
float *h_multiply_out, *h_expensive_out;
// pointer for device vectors v1 and v2
float *d_v1, *d_v2;
// pointers to device output
float *d_multiply_out, *d_expensive_out;
// Number of test iterations to use for timing
int testIterations = 3;
// allocate memory for pointers
h_v1 = (float *)malloc(N * sizeof(float));
h_v2 = (float *)malloc(N * sizeof(float));
h_multiply_correct = (float *)malloc(N * sizeof(float));
h_expensive_correct = (float *)malloc(N * sizeof(float));
h_multiply_out = (float *)malloc(N * sizeof(float));
h_expensive_out = (float *)malloc(N * sizeof(float));
// allocate memory for device pointers
cudaMalloc( (void **) &d_v1, N * sizeof(float));
cudaMalloc( (void **) &d_v2, N * sizeof(float));
cudaMalloc( (void **) &d_multiply_out, N *sizeof(float));
cudaMalloc( (void **) &d_expensive_out, N *sizeof(float));
// Initialize v1 and v2
for (int i = 0; i < N; i++) {
h_v1[i] = 1.0 * (i % 10000);
h_v2[i] = 2 * h_v1[i];
}
float multiply_time = 0;
float expensive_time = 0;
float time = 0;
// CPU functions over testIterations
for (int i = 0; i < testIterations; i++) {
// zero memory for outputs
memset(h_multiply_correct, 0, N * sizeof(float));
memset(h_expensive_correct, 0, N * sizeof(float));
time = clock();
// run cpu kernel
multiplyCPU(h_v1, h_v2, h_multiply_correct);
multiply_time += (clock()-time)/CLOCKS_PER_SEC*1000;
// run cpu kernel
time = clock();
expensiveFunctionCPU(h_v1, h_v2, h_expensive_correct);
expensive_time += (clock()-time)/CLOCKS_PER_SEC*1000;
}
printf("Multiply serial run time: %fms\n", multiply_time / testIterations);
printf("Expensive serial run time: %fms\n", expensive_time / testIterations);
//////////////////////// INSERT CODE IN THIS SECTION /////////////////////
// GPU functions over testIterations
multiply_time = 0;
expensive_time = 0;
for (int i = 0; i < testIterations; i++) {
// zero output memory
memset(h_multiply_out, 0, N * sizeof(float));
memset(h_expensive_out, 0, N * sizeof(float));
cudaMemset(d_multiply_out, 0, N * sizeof(float));
cudaMemset(d_expensive_out, 0, N * sizeof(float));
// zero input memory
cudaMemset(d_v1, 0, N * sizeof(float));
cudaMemset(d_v2, 0, N * sizeof(float));
// transfer data to GPU
cudaMemcpy(d_v1, h_v1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_v2, h_v2, N * sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t multiply_begin, multiply_end, expensive_begin, expensive_end;
cudaEventCreate(&multiply_begin);
cudaEventCreate(&multiply_end);
cudaEventCreate(&expensive_begin);
cudaEventCreate(&expensive_end);
cudaEventRecord(multiply_begin, 0);
const size_t block_size = 1024;
size_t grid_size = N / block_size;
if(N % block_size) ++grid_size;
multiplyGPU<<<grid_size, block_size>>>(d_v1, d_v2, d_multiply_out);
cudaEventRecord(multiply_end, 0);
cudaEventSynchronize(multiply_end);
cudaEventElapsedTime(&time, multiply_begin, multiply_end);
multiply_time += time;
// transfer data from GPU
cudaMemcpy(h_multiply_out, d_multiply_out, N * sizeof(float), cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("multiplyGPU");
// zero input memory
cudaMemset(d_v1, 0, N * sizeof(float));
cudaMemset(d_v2, 0, N * sizeof(float));
// transfer data to GPU
cudaMemcpy(d_v1, h_v1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_v2, h_v2, N * sizeof(float), cudaMemcpyHostToDevice);
const size_t block_size2 = 128;
size_t grid_size2 = N / block_size2;
if(N % block_size2) ++grid_size2;
cudaEventRecord(expensive_begin, 0);
expensiveFunctionGPU<<<grid_size2, block_size2>>>(d_v1, d_v2, d_expensive_out);
cudaEventRecord(expensive_end, 0);
cudaEventSynchronize(expensive_end);
cudaEventElapsedTime(&time, expensive_begin, expensive_end);
expensive_time += time;
// transfer data from GPU
cudaMemcpy(h_expensive_out, d_expensive_out, N * sizeof(float), cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("expensiveFunctionGPU");
}
printf("Multiply kernel run time: %fms\n", multiply_time/ testIterations);
printf("Expensive kernel run time: %fms\n", expensive_time/ testIterations);
/////////////////////////////// VALIDATION ///////////////////////////////
// check if output from gpu kernels is correct
for (int i = 0; i < N; i++) {
if (!(abs(h_multiply_out[i] - h_multiply_correct[i]) <= 0.0001)) {
printf("Test failed (h_multiply_out[%d]:%f != h_multiply_correct[%d]:%f)!\n",
i, h_multiply_out[i], i, h_multiply_correct[i]);
exit(1);
}
if (!(abs(h_expensive_out[i] - h_expensive_correct[i]) <= 0.00001 * abs(h_expensive_correct[i]))) {
printf("Test failed (h_expensive_out[%d]:%f != h_expensive_correct[%d]:%f)!\n",
i, h_expensive_out[i], i, h_expensive_correct[i]);
exit(1);
}
}
printf("Test passed!\n");
//////////////////////////////// CLEANUP /////////////////////////////////
// free host memory
free(h_v1);
free(h_v2);
free(h_multiply_correct);
free(h_expensive_correct);
free(h_multiply_out);
free(h_expensive_out);
// free device memory
cudaFree(d_v1);
cudaFree(d_v2);
cudaFree(d_multiply_out);
cudaFree(d_expensive_out);
return 0;
}
|
9,678 | #include "includes.h"
__global__ void transposeSmem(float *out, float *in, int nx, int ny)
{
// static shared memory
__shared__ float tile[BDIMY][BDIMX];
// coordinate in original matrix
unsigned int ix, iy, ti, to;
ix = blockDim.x * blockIdx.x + threadIdx.x;
iy = blockDim.y * blockIdx.y + threadIdx.y;
// linear global memory index for original matrix
ti = iy * nx + ix;
// thread index in transposed block
unsigned int bidx, irow, icol;
bidx = threadIdx.y * blockDim.x + threadIdx.x;
irow = bidx / blockDim.y;
icol = bidx % blockDim.y;
// coordinate in transposed matrix
ix = blockDim.y * blockIdx.y + icol;
iy = blockDim.x * blockIdx.x + irow;
// linear global memory index for transposed matrix
to = iy * ny + ix;
// transpose with boundary test
if (ix < nx && iy < ny)
{
// load data from global memory to shared memory
tile[threadIdx.y][threadIdx.x] = in[ti];
// thread synchronization
__syncthreads();
// store data to global memory from shared memory
out[to] = tile[icol][irow];
}
} |
9,679 | class Node;
template<class type>
class Stack;
__global__ void rounding(float *d_f_points, int *d_points, Stack<Node> *d_roots)
{
int index = threadIdx.x * 3;
for(int i = 0; i<3; i++)
{
d_points[index + i]=floor(d_f_points[index + i]);
}
} |
9,680 | /** u_p.cu
* \file u_p.cu
* \brief computation of u, p for 2-dim. incompressible Navier-Stokes equation with finite difference
* \author Ernest Yeung
* \email ernestyalumni@gmail.com
* \date 20161209
*
* compilation tip: (compile separately)
* nvcc -std=c++11 -c ./physlib/u_p.cu -o u_p.o
*
*/
/*------------------------------------------------------------------- */
/* Computation of tentative velocity field (F,G) -------------------- */
/*------------------------------------------------------------------- */
__global__ void compute_F(const float deltat,
const float* u, const float* v, float* F,
const int imax, const int jmax, const float deltax, const float deltay,
const float gamma, const float Re) {
const int i = threadIdx.x + (blockDim.x*blockIdx.x) ; // be forewarned; Niemeyer has this "reversed" as the row as indexed differently as (blockIdx.x * blockDim.x)+threadIdx.x + 1
const int j = threadIdx.y + (blockDim.y*blockIdx.y) ; // be forewarned; Niemeyer has this "reversed" as the col as indexed differently as (blockIdx.y * blockDim.y)+threadIdx.y + 1
if ((i > (imax+1)) || (j > (jmax+1))) { return; }
if ((i>0)&&(i<=(imax-1))&&(j>0)&&(j<=jmax)) {
// velocities u
float u_ij = u[i + (imax+2)*j];
float u_ip1j = u[i+1 + (imax+2)*j];
float u_im1j = u[i-1 + (imax+2)*j];
float u_ijp1 = u[i + (imax+2)*(j+1)];
float u_ijm1 = u[i + (imax+2)*(j-1)];
// velocities v
float v_ij = v[i + (imax+2)*j];
float v_ip1j = v[i+1 + (imax+2)*j];
float v_ijm1 = v[i + (imax+2)*(j-1)];
float v_ip1jm1 = v[i+1 + (imax+2)*(j-1)];
// finite differences
float du2dx, duvdy;
du2dx = ( ((u_ij + u_ip1j) * (u_ij + u_ip1j) - (u_im1j + u_ij) * (u_im1j + u_ij) ) +
gamma * ( fabs( u_ij + u_ip1j) * (u_ij - u_ip1j) -
fabs( u_im1j + u_ij) * (u_im1j - u_ij) ) )/(4.0*deltax) ;
duvdy = ( ((v_ij + v_ip1j)*(u_ij + u_ijp1) - (v_ijm1 + v_ip1jm1)*(u_ijm1 + u_ij) ) +
gamma * ( fabs( v_ij + v_ip1j ) * (u_ij - u_ijp1) -
fabs( v_ijm1 + v_ip1jm1 ) * ( u_ijm1 - u_ij ) ))/ (4.0 * deltay) ;
// calculate the Laplacian, d^2udx^2 + d^2vdy^2, Laplacianu, i.e. Niemeyer's d2udx2, d2udy2
float Laplacianu = (u_ip1j - 2.0*u_ij + u_im1j)/deltax/deltax +
(u_ijp1 - 2.0*u_ij + u_ijm1)/deltay/deltay ;
float temp_F = u_ij + deltat * (Laplacianu/Re - du2dx - duvdy );
F[i + (imax+2)*j] = temp_F;
}
/* F at external boundary */
/* ----------------------- */
// eastern boundary or right boundary, F_ij = u_ij
if (i == imax) {
if ((j>=1)&&(j<=jmax)) {
float u_imaxj = u[ imax + (imax+2)*j] ;
F[imax+(imax+2)*j] = u_imaxj ; }
}
// western boundary or left boundary
if (i == 0) {
if ((j>=1)&&(j<=jmax)) {
float u_0j = u[ 0 + (imax+2)*j] ;
F[0 + (imax+2)*j] = u_0j ; }
}
} // end of compute_F
__global__ void compute_G(const float deltat,
const float* u, const float* v, float* G,
const int imax, const int jmax, const float deltax, const float deltay,
const float gamma, const float Re) {
const int i = threadIdx.x + (blockDim.x*blockIdx.x) ; // be forewarned; Niemeyer has this "reversed" as the row as indexed differently as (blockIdx.x * blockDim.x)+threadIdx.x + 1
const int j = threadIdx.y + (blockDim.y*blockIdx.y) ; // be forewarned; Niemeyer has this "reversed" as the col as indexed differently as (blockIdx.y * blockDim.y)+threadIdx.y + 1
if ((i > (imax+1)) || (j > (jmax+1))) { return; }
if ((i>0)&&(i<=imax)&&(j>0)&&(j<=(jmax-1)) ) {
// velocities u
float u_ij = u[i + (imax+2)*j];
float u_im1j = u[i-1 + (imax+2)*j];
float u_ijp1 = u[i + (imax+2)*(j+1)];
float u_im1jp1 = u[i-1 + (imax+2)*(j+1)];
// velocities v
float v_ij = v[i + (imax+2)*j];
float v_ip1j = v[i+1 + (imax+2)*j];
float v_im1j = v[i-1 + (imax+2)*j];
float v_ijp1 = v[i + (imax+2)*(j+1)];
float v_ijm1 = v[i + (imax+2)*(j-1)];
float duvdx = (( (u_ij + u_ijp1)*(v_ij + v_ip1j) - (u_im1j + u_im1jp1)*(v_im1j+v_ij))
+ gamma * ( fabs(u_ij + u_ijp1) * (v_ij - v_ip1j) -
fabs(u_im1j + u_im1jp1) * (v_im1j - v_ij) ))/(4.0*deltax);
float dv2dy = (((v_ij + v_ijp1 )*( v_ij + v_ijp1 ) - ( v_ijm1 + v_ij )*( v_ijm1 + v_ij))
+ gamma * ( fabs( v_ij + v_ijp1) * (v_ij - v_ijp1) -
fabs( v_ijm1 + v_ij ) * ( v_ijm1 - v_ij) ))/(4.0*deltay);
// calculate the Laplacian, d^2vdx^2 + d^2vdy^2, Laplacianu, i.e. Niemeyer's d2vdx2, d2vdy2
float Laplacianv = (v_ip1j -2.0*v_ij + v_im1j)/deltax/deltax +
(v_ijp1 -2.0*v_ij + v_ijm1)/deltay/deltay ;
float temp_G = v_ij + deltat * (Laplacianv/Re - dv2dy - duvdx );
G[i + (imax+2)*j] = temp_G;
}
/* G at external boundary */
/* ----------------------- */
// northern boundary or top boundary
if (j == jmax) {
if ((i>=1)&&(i<=imax)) {
float v_ijmax = v[ i + (imax+2)*jmax] ;
G[i + (imax+2)*jmax] = v_ijmax ; }
}
// southern boundary or bottom boundary
if (j == 0) {
if ((i>=1)&&(i<=imax)) {
float v_i0 = v[ i + 0 ] ;
G[i + 0 ] = v_i0; }
}
} // end of compute_G
////////////////////////////////////////////////////////////////////////
// copy_press_int /brief copy interior pressure values
/*__host__ void copy_press_int( thrust::device_vector<float> p_all,
thrust::device_vector<float> & p_int,
const int imax, const int jmax) {
for (auto j = 0; j < (jmax+2); ++j) {
for (auto i = 0; i < (imax+2); ++i) {
if ((i>0)&&(i<(imax+1)) && (j>0) && (j<(jmax+1))) {
const int k = (i-1) + imax * (j-1) ;
p_int[k] = p_all[ i + (imax+2)*j ] ;
}
}
}
}
void copy_press_int( const float* p_all, float* p_int,
const int imax, const int jmax) {
for (auto j = 0; j < (jmax+2); ++j) {
for (auto i = 0; i < (imax+2); ++i) {
if ((i>0)&&(i<(imax+1)) && (j>0) && (j<(jmax+1))) {
const int k = (i-1) + imax * (j-1) ;
p_int[k] = p_all[ i + (imax+2)*j ] ;
}
}
}
}
__global__ void sum_pressure( cudaSurfaceObject_t pSurfObj,
const int imax, const int jmax, float* pres_sum) {
const int k_x = threadIdx.x + (blockDim.x*blockIdx.x) ; // be forewarned; Niemeyer has this "reversed" as the row as indexed differently as (blockIdx.x * blockDim.x)+threadIdx.x + 1
const int k_y = threadIdx.y + (blockDim.y*blockIdx.y) ; // be forewarned; Niemeyer has this "reversed" as the col as indexed differently as (blockIdx.y * blockDim.y)+threadIdx.y + 1
const int k = k_x + (imax+2)*k_y; // take note of the "striding" here, or i.e. choice of "stride" as imax+2 here
if ((k_x > (imax+1)) || (k_y > (jmax+1))) { return; }
float temp_val = 0.0; // temporary value
float psq = 0.0; // residual, squared
if ((k_x >= 1)&&(k_x<=imax)) {
if ((k_y >=1)&&(k_y<=jmax)) {
surf2Dread(&temp_val, pSurfObj, k_x*4, k_y);
psq = temp_val * temp_val;
pres_sum[k] = psq; }
}
if ((k_x == 0) || (k_y == 0) || (k_x == (imax+1)) || (k_y == (jmax+1)) ) {
pres_sum[k] = 0.0 ; }
}
*/
/*------------------------------------------------------------------- */
/* Computation of the right hand side of the pressure equation ------ */
/* it's the "RHS" of the Poisson equation involving pressure p */
/*------------------------------------------------------------------- */
__global__ void compute_RHS( const float* F, const float* G,
float* RHS,
const int imax, const int jmax,
const float deltat, const float deltax, const float deltay) {
// "striding" needed to "flatten" (i,j) indices
const int Nx = imax+2;
const int i = threadIdx.x + (blockDim.x*blockIdx.x) ; // be forewarned; Niemeyer has this "reversed" as the row as indexed differently as (blockIdx.x * blockDim.x)+threadIdx.x + 1
const int j = threadIdx.y + (blockDim.y*blockIdx.y) ; // be forewarned; Niemeyer has this "reversed" as the col as indexed differently as (blockIdx.y * blockDim.y)+threadIdx.y + 1
if ((i > (imax+1)) || (j > (jmax+1))) { return; }
if ((i >=1)&&(i<=imax)) {
if ((j >=1)&&(j<=jmax)) {
float F_ij = F[ i + Nx * j] ;
float F_im1j = F[ i-1 + Nx * j] ;
float G_ij = G[ i + Nx * j] ;
float G_ijm1 = G[ i + Nx * (j-1)] ;
float temp_RHS_val = ((F_ij-F_im1j)/deltax + (G_ij-G_ijm1)/deltay)/deltat;
RHS[ i + Nx * j ] = temp_RHS_val;
}
}
}
/*------------------------------------------------------------------- */
/* SOR iteration for the Poisson equation for the pressure
/*------------------------------------------------------------------- */
__global__ void poisson( const float* p, const float* RHS,
float* p_temp,
const int imax, const int jmax,
const float deltax, const float deltay,
const float omega) {
// "striding" needed to "flatten" (i,j) indices
const int Nx = imax+2;
const int i = threadIdx.x + (blockDim.x*blockIdx.x) ; // be forewarned; Niemeyer has this "reversed" as the row as indexed differently as (blockIdx.x * blockDim.x)+threadIdx.x + 1
const int j = threadIdx.y + (blockDim.y*blockIdx.y) ; // be forewarned; Niemeyer has this "reversed" as the col as indexed differently as (blockIdx.y * blockDim.y)+threadIdx.y + 1
if ((i > (imax+1)) || (j > (jmax+1))) { return; }
if ((i >= 1)&&(i <=imax)) {
if ((j >=1)&&( j <=jmax)) {
float p_ij = p[ i + Nx *j] ;
float p_ip1j = p[ i +1 + Nx *j] ;
float p_im1j = p[ i -1 + Nx *j] ;
float p_ijp1 = p[ i + Nx * (j+1) ] ;
float p_ijm1 = p[ i + Nx * (j-1) ] ;
float RHS_ij = RHS[ i + Nx * j];
float temp_val = (1.0-omega)*p_ij +
(omega/(2.0*(1./deltax/deltax+1./deltay/deltay))) *
( (p_ip1j + p_im1j)*(1./deltax/deltax) +
(p_ijp1 + p_ijm1)*(1./deltay/deltay) -
RHS_ij ) ; // temp_val is RHS of poisson equation for pressure, in this case
p_temp[ i+Nx*j] = temp_val ; }
}
}
__global__ void poisson_redblack( float* p, const float* RHS,
const int imax, const int jmax,
const float deltax, const float deltay,
const float omega) {
// "striding" needed to "flatten" (i,j) indices
const int Nx = imax+2;
const int i = threadIdx.x + (blockDim.x*blockIdx.x) ; // be forewarned; Niemeyer has this "reversed" as the row as indexed differently as (blockIdx.x * blockDim.x)+threadIdx.x + 1
const int j = threadIdx.y + (blockDim.y*blockIdx.y) ; // be forewarned; Niemeyer has this "reversed" as the col as indexed differently as (blockIdx.y * blockDim.y)+threadIdx.y + 1
if ((i > (imax+1)) || (j > (jmax+1))) { return; }
if ((i >= 1)&&(i <=imax)) {
if ((j >=1)&&( j <=jmax)) {
if ( ((i+j) % 2) ==0 ) // red
{
float p_ij = p[ i + Nx *j] ;
float p_ip1j = p[ i +1 + Nx *j] ;
float p_im1j = p[ i -1 + Nx *j] ;
float p_ijp1 = p[ i + Nx * (j+1) ] ;
float p_ijm1 = p[ i + Nx * (j-1) ] ;
float RHS_ij = RHS[ i + Nx * j];
float temp_val = (1.0-omega)*p_ij +
(omega/(2.0*(1./deltax/deltax+1./deltay/deltay))) *
( (p_ip1j + p_im1j)*(1./deltax/deltax) +
(p_ijp1 + p_ijm1)*(1./deltay/deltay) -
RHS_ij ) ; // temp_val is RHS of poisson equation for pressure, in this case
p[ i+Nx*j] = temp_val ; } }
}
__syncthreads();
if ((i >= 1)&&(i <=imax)) {
if ((j >=1)&&( j <=jmax)) {
if ( ((i+j) % 2) == 1 ) // black
{
float p_ij = p[ i + Nx *j] ;
float p_ip1j = p[ i +1 + Nx *j] ;
float p_im1j = p[ i -1 + Nx *j] ;
float p_ijp1 = p[ i + Nx * (j+1) ] ;
float p_ijm1 = p[ i + Nx * (j-1) ] ;
float RHS_ij = RHS[ i + Nx * j];
float temp_val = (1.0-omega)*p_ij +
(omega/(2.0*(1./deltax/deltax+1./deltay/deltay))) *
( (p_ip1j + p_im1j)*(1./deltax/deltax) +
(p_ijp1 + p_ijm1)*(1./deltay/deltay) -
RHS_ij ) ; // temp_val is RHS of poisson equation for pressure, in this case
p[ i+Nx*j] = temp_val ; } }
}
}
/* ------------------------------------------------------------------ */
/* Computation of residual */
/* ------------------------------------------------------------------ */
__global__ void compute_residual( const float* p, const float* RHS,
const int imax, const int jmax,
const float deltax, const float deltay,
float* residualsq_Array) {
/* can't do this
thrust::device_vector<float> & residualsq) {
* cf. http://stackoverflow.com/questions/5510715/thrust-inside-user-written-kernels
* */
// "striding" needed to "flatten" (i,j) indices
const int Nx = imax+2;
const int i = threadIdx.x + (blockDim.x*blockIdx.x) ; // be forewarned; Niemeyer has this "reversed" as the row as indexed differently as (blockIdx.x * blockDim.x)+threadIdx.x + 1
const int j = threadIdx.y + (blockDim.y*blockIdx.y) ; // be forewarned; Niemeyer has this "reversed" as the col as indexed differently as (blockIdx.y * blockDim.y)+threadIdx.y + 1
if ((i > (imax+1)) || (j > (jmax+1))) { return; }
if ((i >= 1)&&(i <=imax)) {
if ((j >=1)&&( j <=jmax)) {
float p_ij = p[ i + Nx *j] ;
float p_ip1j = p[ i +1 + Nx *j] ;
float p_im1j = p[ i -1 + Nx *j] ;
float p_ijp1 = p[ i + Nx * (j+1) ] ;
float p_ijm1 = p[ i + Nx * (j-1) ] ;
float RHS_ij = RHS[ i + Nx * j ] ;
// "temp_val" here is residual or res or "res2" for Griebel, et. al. and/or Niemeyer
float temp_val = (((p_ip1j - p_ij)-(p_ij-p_im1j))/deltax/deltax +
((p_ijp1 - p_ij)-(p_ij-p_ijm1))/deltay/deltay) - RHS_ij;
float ressq = temp_val * temp_val; // residual, squared
residualsq_Array[i + Nx * j] = ressq; }
}
if ((i == 0) || (j == 0) || (i == (imax+1)) || (j == (jmax+1)) ) {
residualsq_Array[i + Nx *j] = 0.0 ; }
}
/*------------------------------------------------------------------- */
/* computation of new velocity values */
/*------------------------------------------------------------------- */
__global__ void calculate_u( float* u, const float* p, const float* F,
const int imax, const int jmax, const float deltat, const float deltax ) {
// "striding" needed to "flatten" (i,j) indices
const int Nx = imax+2;
const int i = threadIdx.x + (blockDim.x*blockIdx.x) ; // be forewarned; Niemeyer has this "reversed" as the row as indexed differently as (blockIdx.x * blockDim.x)+threadIdx.x + 1
const int j = threadIdx.y + (blockDim.y*blockIdx.y) ; // be forewarned; Niemeyer has this "reversed" as the col as indexed differently as (blockIdx.y * blockDim.y)+threadIdx.y + 1
if ((i > (imax+1)) || (j > (jmax+1))) { return; }
float new_u = 0.0;
if ((i >= 1)&&(i <=(imax-1))) {
if ((j >=1)&&( j <=jmax)) {
float p_ij = p[ i + Nx *j] ;
float p_ip1j = p[ i +1 + Nx *j] ;
float F_ij = F[ i + Nx * j ] ;
new_u = F_ij - (p_ip1j - p_ij)*deltat/deltax ;
u[ i + Nx *j] = new_u ; }
}
}
__global__ void calculate_v( float* v, const float* p, const float* G,
const int imax, const int jmax, const float deltat, const float deltay ) {
// "striding" needed to "flatten" (i,j) indices
const int Nx = imax+2;
const int i = threadIdx.x + (blockDim.x*blockIdx.x) ; // be forewarned; Niemeyer has this "reversed" as the row as indexed differently as (blockIdx.x * blockDim.x)+threadIdx.x + 1
const int j = threadIdx.y + (blockDim.y*blockIdx.y) ; // be forewarned; Niemeyer has this "reversed" as the col as indexed differently as (blockIdx.y * blockDim.y)+threadIdx.y + 1
if ((i > (imax+1)) || (j > (jmax+1))) { return; }
float new_v = 0.0;
if ((i >= 1)&&(i <=imax)) {
if ((j >=1)&&( j <=(jmax-1))) {
float p_ij = p[ i + Nx *j] ;
float p_ijp1 = p[ i + Nx *(j+1)] ;
float G_ij = G[ i + Nx * j ] ;
new_v = G_ij - (p_ijp1 - p_ij)*deltat/deltay ;
v[ i + Nx *j] = new_v ; }
}
}
/* --------------------------------------------------------- */
/* Routines to assist in the */
/* Computation of adaptive time stepsize satisfying */
/* the CFL stability criteria */
/* and set the flag "write" if some data has to be written */
/* into a file. */
/* --------------------------------------------------------- */
/*void calculate_max_uv( thrust::device_vector<float> & max_u_vec, thrust::device_vector<float> & max_v_vec,
const thrust::device_vector<float> u_vec, const thrust::device_vector<float> v_vec ) {
}
* */
|
9,681 | #include <stdio.h>
#include <float.h>
#include <sys/time.h>
// The number of threads per blocks in the kernel
// (if we define it here, then we can use its value in the kernel,
// for example to statically declare an array in shared memory)
const int threads_per_block = 256;
// Forward function declarations
float *GPU_vector_max(float *A, int N);
float *CPU_add_vectors(float *A, float *B, int N);
float *get_random_vector(int N);
float cpu_max(float *A, int N);
float cpu_min(float *A, int N);
float cpu_mean(float *A, int N);
float cpu_stdev(float *A, int N);
long long start_timer();
long long stop_timer(long long start_time, char *name);
void die(char *message);
int main(int argc, char **argv) {
// Seed the random generator (use a constant here for repeatable results)
srand(10);
// Determine the vector length
int N = 100000; // default value
if (argc > 1) N = atoi(argv[1]); // user-specified value
// Start the timer
long long vector_start_time = start_timer();
//create a random vector
float *A;
A = get_random_vector(N);
stop_timer(vector_start_time, "Vector generation");
long long cpu_start_time = start_timer();
//Compute the max, min, mean, and stdev value of the vector
float max_val = cpu_max(A,N);
float min_val = cpu_min(A,N);
float mean_val = cpu_mean(A,N);
float stdev_val = cpu_stdev(A,N);
long long CPU_time = stop_timer(cpu_start_time, "CPU");
// Compute the results on the GPU
long long GPU_start_time = start_timer();
float* result = GPU_vector_max(A, N);
long long GPU_time = stop_timer(GPU_start_time, "\t Total");
printf("*************** R E S U L T S **************\n");
printf("GPU Max :\t%f\n",result[0]);
printf("CPU Max :\t%f\n", max_val);
printf("GPU Min :\t%f\n",result[1]);
printf("CPU Min :\t%f\n", min_val);
printf("GPU Mean :\t%f\n",result[2]);
printf("CPU Mean :\t%f\n", mean_val);
printf("GPU sigma:\t%f\n",result[3]);
printf("CPU sigma:\t%f\n", stdev_val);
printf("*******************************************\n");
// Compute the speedup or slowdown
if (GPU_time > CPU_time) printf("\nCPU outperformed GPU by %.2fx\n", (float) GPU_time / (float) CPU_time);
else printf("\nGPU outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time);
// Check the correctness of the GPU results
int num_wrong = 0;
if (fabs(max_val - result[0]) > 0.000001) num_wrong++;
if (fabs(min_val - result[1]) > 0.000001) num_wrong++;
if (fabs(mean_val - result[2]) > 0.000001) num_wrong++;
if (fabs(stdev_val - result[3]) > 0.000001) num_wrong++;
// Report the correctness results
if (num_wrong) printf("\n%d / %d values incorrect\n", num_wrong, N);
else printf("\nAll values correct\n");
}
// A GPU kernel that computes the vector sum A + B
// (each thread computes a single value of the result)
__global__ void vector_max(float *max, float *min, float *mean, int N) {
// Determine which element this thread is computing
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x* block_id + threadIdx.x;
const int MAX = 0;
const int MIN = 1;
const int MEAN = 2;
//allocate shared memory
__shared__ float values[threads_per_block][3];
//copy all values to shared memory
if(thread_id < N){
values[threadIdx.x][MAX] = max[thread_id];
values[threadIdx.x][MIN] = min[thread_id];
values[threadIdx.x][MEAN] = mean[thread_id];
}else{
values[threadIdx.x][MAX] = FLT_MIN;
values[threadIdx.x][MIN] = FLT_MAX;
values[threadIdx.x][MEAN] = 0;
}
__syncthreads();
int th = threads_per_block/2;
//loop until dead or single element
while(th>0){
//for our pair, compute the max
//if we're a valid value
if(threadIdx.x < th){
//max calculation
if(values[threadIdx.x][MAX] < values[threadIdx.x + th][MAX]){
//copy
values[threadIdx.x][MAX] = values[threadIdx.x + th][MAX];
}
//min calculation
if(values[threadIdx.x][MIN] > values[threadIdx.x + th][MIN]){
//copy
values[threadIdx.x][MIN] = values[threadIdx.x + th][MIN];
}
//sum calculation
values[threadIdx.x][MEAN] += values[threadIdx.x + th][MEAN];
}
th /= 2;
__syncthreads();
}
//reduce the values
if(threadIdx.x == 0 ) {
max[block_id] = values[0][MAX];
min[block_id] = values[0][MIN];
mean[block_id] = values[0][MEAN];
}
//now each block has its own min element
//we want to compute the min element of all blocks
}
// A GPU kernel that computes the standard deviation given a vector, its length, and its mean
// (each thread computes a single value of the result)
__global__ void vector_std(float *vals, int mean, int N, int first) {
// Determine which element this thread is computing
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x* block_id + threadIdx.x;
//allocate shared memory
__shared__ float values[threads_per_block];
//copy all values to shared memory
if(thread_id < N){
if(first == 1){
//if we're the first kernel call, we want to calculate the difference of squares
values[threadIdx.x] = (vals[thread_id] - mean) * (vals[thread_id] - mean);
}else{
//otherwise we just want to sum the original values
values[threadIdx.x] = vals[thread_id];
}
}else{
values[threadIdx.x] = 0;
}
__syncthreads();
int th = threads_per_block/2;
//loop until dead or single element
while(th>0){
//for our pair, compute the max
//if we're a valid value
if(threadIdx.x < th){
//sum of squares of differneces calculation
values[threadIdx.x] += values[threadIdx.x + th];
}
th /= 2;
__syncthreads();
}
//reduce the values
if(threadIdx.x == 0 ) {
vals[block_id] = values[0];
}
//now each block has its own min element
//we want to compute the min element of all blocks
}
// Returns the max value of vector A (computed on the GPU)
float *GPU_vector_max(float *A_CPU, int N) {
long long memory_start_time = start_timer();
// Allocate GPU memory for the inputs and the result
int vector_size = N * sizeof(float);
int vector_length = N;
float *max_GPU;
float *min_GPU;
float *mean_GPU;
float *std_GPU;
//allocate working arrays on GPU
if (cudaMalloc((void **) &max_GPU, vector_size) != cudaSuccess) die("Error allocating GPU memory");
if (cudaMalloc((void **) &min_GPU, vector_size) != cudaSuccess) die("Error allocating GPU memory");
if (cudaMalloc((void **) &mean_GPU, vector_size) != cudaSuccess) die("Error allocating GPU memory");
if (cudaMalloc((void **) &std_GPU, vector_size) != cudaSuccess) die("Error allocating GPU memory");
// Transfer the input vectors to GPU memory
cudaMemcpy(max_GPU, A_CPU, vector_size, cudaMemcpyHostToDevice);
cudaMemcpy(min_GPU, A_CPU, vector_size, cudaMemcpyHostToDevice);
cudaMemcpy(mean_GPU, A_CPU, vector_size, cudaMemcpyHostToDevice);
cudaMemcpy(std_GPU, A_CPU, vector_size, cudaMemcpyHostToDevice);
stop_timer(memory_start_time, "\nGPU:\t Transfer to GPU");
// Determine the number of thread blocks
int num_blocks = (int) ((float) (N + threads_per_block - 2) / (float) threads_per_block);
int max_blocks_per_dimension = 65535;
dim3 grid_size(1,1,1);
if(num_blocks > max_blocks_per_dimension){
grid_size.x = max_blocks_per_dimension; grid_size.y = num_blocks/max_blocks_per_dimension + 1;
}else{
grid_size.x = num_blocks;
}
float *result = (float *)malloc(4 * sizeof(float));
// Execute the kernel to compute the vector max on the GPU
long long first_kernel_start_time = start_timer();
//while N > threads per block
while(num_blocks > 0){
vector_max <<< grid_size , threads_per_block >>> (max_GPU, min_GPU, mean_GPU, N);
cudaThreadSynchronize(); // this is only needed for timing purposes
//resize N
N = num_blocks;
// Determine the new number of thread blocks
num_blocks = (int) ((float) (N + threads_per_block - 2) / (float) threads_per_block);
if(num_blocks > max_blocks_per_dimension){
grid_size.x = max_blocks_per_dimension; grid_size.y = num_blocks/max_blocks_per_dimension + 1;
}else{
grid_size.x = num_blocks;
}
}
stop_timer(first_kernel_start_time, "\t Min,Max,Mean kernel execution");
// Transfer the result from the GPU to the CPU
memory_start_time = start_timer();
cudaMemcpy(&result[0], max_GPU, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&result[1], min_GPU, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&result[2], mean_GPU, sizeof(float), cudaMemcpyDeviceToHost);
//calculate mean
result[2] = result[2]/vector_length;
stop_timer(memory_start_time, "\t Transfer from GPU");
// Free the GPU memory
cudaFree(max_GPU);
cudaFree(min_GPU);
cudaFree(mean_GPU);
//second launch to calculate std deviation in two passes
//reset N
N = vector_length;
// Determine the number of thread blocks
num_blocks = (int) ((float) (N + threads_per_block - 2) / (float) threads_per_block);
if(num_blocks > max_blocks_per_dimension){
grid_size.x = max_blocks_per_dimension; grid_size.y = num_blocks/max_blocks_per_dimension + 1;
}else{
grid_size.x = num_blocks;
grid_size.y = 1;
}
int first = 1;
long long second_kernel_start_time = start_timer();
while(num_blocks > 0){
vector_std <<< grid_size , threads_per_block >>> (std_GPU, result[2], N, first);
cudaThreadSynchronize(); // this is only needed for timing purposes
//make sure that we do not do mean subtraction and squaring again.
first++;
//resize N
N = num_blocks;
// Determine the new number of thread blocks
num_blocks = (int) ((float) (N + threads_per_block - 2) / (float) threads_per_block);
if(num_blocks > max_blocks_per_dimension){
grid_size.x = max_blocks_per_dimension; grid_size.y = num_blocks/max_blocks_per_dimension + 1;
}else{
grid_size.x = num_blocks;
}
}
stop_timer(second_kernel_start_time, "\tStdev Kernel execution");
// Check for kernel errors
cudaError_t error = cudaGetLastError();
if (error) {
char message[256];
sprintf(message, "CUDA error: %s", cudaGetErrorString(error));
die(message);
}
memory_start_time = start_timer();
//copy result from GPU
cudaMemcpy(&result[3], std_GPU, sizeof(float), cudaMemcpyDeviceToHost);
//do std deviation calc
result[3] = sqrt(result[3]/vector_length);
stop_timer(memory_start_time, "\tTransfer from GPU");
//free std_GPU
cudaFree(std_GPU);
return result;
}
// Returns the vector sum A + B
float *CPU_add_vectors(float *A, float *B, int N) {
// Allocate memory for the result
float *C = (float *) malloc(N * sizeof(float));
if (C == NULL) die("Error allocating CPU memory");
// Compute the sum;
for (int i = 0; i < N; i++) C[i] = A[i] + B[i];
// Return the result
return C;
}
// Returns a randomized vector containing N elements
float *get_random_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V = (float *) malloc(N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) {
V[i] = (float) rand() / (float) rand();
}
// Return the randomized vector
return V;
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec\n", name, ((float) (end_time - start_time)) / (1000 * 1000));
return end_time - start_time;
}
float cpu_max(float *A, int N){
float max = A[0];
for(int i = 1; i<N; i++){
if(A[i] > max){
max = A[i];
}
}
return max;
}
float cpu_min(float *A, int N){
float min = A[0];
for(int i = 1; i<N; i++){
if(A[i] < min){
min = A[i];
}
}
return min;
}
float cpu_mean(float *A, int N){
float mean = A[0];
for(int i = 1; i<N; i++){
mean += A[i];
}
return mean/N;
}
float cpu_stdev(float *A, int N){
//get average
float mean = cpu_mean(A,N);
//get sum of squares of differences
float sum_diff_squared = 0.0;
for(int i=0; i<N; ++i){
sum_diff_squared += (A[i]-mean) * (A[i]-mean);
} //end loop
//return square root of average
return sqrt(sum_diff_squared / N);
}//eo cpu_stdev
// Prints the specified message and quits
void die(char *message) {
printf("%s\n", message);
exit(1);
}
|
9,682 | #include "includes.h"
__device__ float rowcol_dot(float * matrix_a, float * matrix_b, int row, int col, int N)
{
float val = 0;
for (int k = 0; k < N; k++)
{
val += matrix_a[row*N + k] * matrix_b[col + k*N];
}
return val;
}
__global__ void matrix_mult_ker(float * matrix_a, float * matrix_b, float * output_matrix, int N)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
output_matrix[col + row * N] = rowcol_dot(matrix_a, matrix_b, row, col, N);
} |
9,683 | #include "includes.h"
__global__ void initialize(float* a, float* oA, float* x, float totalSize, int n, int ghosts){
int i = threadIdx.x + blockDim.x*blockIdx.x;
for(int j = 0; blockDim.x*j + i < n + 2*ghosts; j++){
int index = blockDim.x*j + i;
a[index] = 0;
oA[index] = 0;
x[index] = totalSize/n;
}
} |
9,684 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#define N 6
#define UPPER 1
#define LOWER N*4
#define THREADS_PER_BLOCK 1
#define BLOCKS 1 //(N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK
void rand_init_array(int *a, int n, int upper, int lower);
void display_array(int *a, int n);
__global__ void setup_kernel(curandState *state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
curand_init(clock64(), idx, 0, &state[idx]);
}
__device__ float getnextrand(curandState *state){
return (float)(curand_uniform(state));
}
__device__ int getnextrandscaled(curandState *state, int scale){
return (int) scale * getnextrand(state);
}
/*
* Function: swap_random
* --------------------
* Randomizes elements of array
*
* a: the array (integer)
* i: the index of element that will be swapped
* n: number of elements in the array
*
*/
__device__ void swap_random(int *a, curandState *state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int r = getnextrandscaled(state, N);
a[r] = atomicExch(&(a[idx]), a[r]);
printf("%d ", r);
}
/*
* Function: is_sorted
* --------------------
* Checks if array is sorted
*
* a: the array (integer)
* n: number of elements in the array
*
*/
__device__ int is_sorted(int *a, int n){
while ( --n >= 1 )
if ( a[n] < a[n-1] || a[n] == a[n-1]) return 0;
return 1;
}
/*
* Function: bogo_sort
* --------------------
* Performs bogo sort (random suffle until the array is sorted)
*
* a: the array (integer)
* n: number of elements in the array
*
*/
__global__ void bogo_sort(int *a, int n, int *found, curandState *state){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n){
while(!found[0]){
swap_random(a, state);
found[0] = is_sorted(a, n);
}
}
}
/*
* Main
*/
int main(int argc, char *argv[]){
float total_time, comp_time;
cudaEvent_t total_start, total_stop, comp_start, comp_stop;
cudaEventCreate(&total_start);
cudaEventCreate(&total_stop);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_stop);
/* Some initializations & allocations to generate random number within kernel */
curandState *d_state;
cudaMalloc(&d_state, sizeof(curandState));
setup_kernel<<< BLOCKS, THREADS_PER_BLOCK >>>(d_state);
/* -------------------------------------------------------------------------- */
/*
* Memory allocation on host
*/
int *array = (int *)malloc(N*sizeof(int));
int *found = {0};
/*
* Init array
*/
rand_init_array(array, N, UPPER, LOWER);
display_array(array, N);
/*
* Memory allocation on device
*/
int *array_dev, *found_dev;
cudaMalloc((void **)&array_dev, N*sizeof(int));
cudaMalloc((void **)&found_dev, 1*sizeof(int));
cudaEventRecord(total_start);
/*
* Copy array from host memory to device memory
*/
cudaMemcpy(array_dev, array, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(found_dev, found, 1*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(comp_start);
/*
* Kernel call
*/
bogo_sort<<< BLOCKS, THREADS_PER_BLOCK >>>(array_dev, N, found_dev, d_state);
cudaEventRecord(comp_stop);
cudaEventSynchronize(comp_stop);
cudaEventElapsedTime(&comp_time, comp_start, comp_stop);
/*
* Copy c from host device memory to host memory
*/
cudaMemcpy(array, array_dev, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(total_stop);
cudaEventSynchronize(total_stop);
cudaEventElapsedTime(&total_time, total_start, total_stop);
/*
* Free memory on device
*/
cudaFree(array_dev);
cudaEventDestroy(comp_start);
cudaEventDestroy(comp_stop);
cudaEventDestroy(total_start);
cudaEventDestroy(total_stop);
/*
* GPU timing
*/
printf("N: %d, blocks: %d, total_threads: %d\n", N, BLOCKS, THREADS_PER_BLOCK*BLOCKS);
printf("Total time (ms): %f\n", total_time);
printf("Kernel time (ms): %f\n", comp_time);
printf("Data transfer time (ms): %f\n", total_time-comp_time);
display_array(array, N);
return 0;
}
/*
* Function: rand_init_array
* --------------------
* Fills an integer array with random numbers
*
* a: the array that will be filled with numbers
* n: number of elements in the array
* upper: highest value of random number
* lower: lowest value of random number
*
*/
void rand_init_array(int *a, int n, int upper, int lower){
int i;
for (i=0; i<n; ++i)
a[i] = (rand() % (upper - lower + 1)) + lower;
}
/*
* Function: display_array
* --------------------
* Prints an integer array to user
*
* a: the array that will be printed
* n: number of elements in the array
*
*/
void display_array(int *a, int n){
int i;
for (i=0; i < n; ++i) printf("%d ", a[i]);
printf("\n");
} |
9,685 | /*Programmer: Chris Tralie
Purpose: To create fast 2D convolutional matrix multiplication code
as parallel CUDA kernels
*/
__global__ void MatMulNaive(float* A, float* B, float* C, int M, int K, int N) {
/*
A: MxK matrix
B: KxN matrix
C: MxN matrix
*/
int i = blockIdx.x;
int j = threadIdx.x;
int k = 0;
float res = 0.0;
for (k = 0; k < K; k++) {
res += A[i*K + k]*B[k*N+j];
}
C[i*N+j] = res;
}
__global__ void MatMulConv2D(float* W, float* H, float* Lam, int M, int N, int K,
int T, int F, int TBlocks, int FBlocks) {
/*
Perform 2D convolutional matrix multiplication
:param W: An TxMxK input matrix
:param H: A FxKxN input matrix
:param Lam: A MxN output matrix
:param M, N, K, T, F: Dimensions
:param TBlocks: Number of blocks of T padding to load in per grid block
:param FBlocks: Number of blocks of F padding to load in per grid block
*/
/*Shared Memory Layout in x, which holds chunks of W and H that are
shared with overlapping convolutions. For a block size of B:
1) W goes from i-F:i+B-1, k, 0:T-1.
2) H goes from k, j-T:j+B-1, 0:F at an offset of (F+B)*T
*/
extern __shared__ float x[];
int hoff = (F+blockDim.x)*T; //Offset of H chunk in shared memory
//TODO: Think about row major coalescing with order of access
int iblock = blockIdx.x*blockDim.x;
int jblock = blockIdx.y*blockDim.y;
int i = iblock + threadIdx.x;
int j = jblock + threadIdx.y;
int MK = M*K;
int KN = K*N;
int k, t, f;
int thist, thisf;
int thisi, thisj;
float res = 0.0;
//Loop over all K separately
for (k = 0; k < K; k++) {
//Step 1: Load chunks of W into shared memory
//W goes from iblock-F+1:iblock+B-1, k, 0:T-1
for (f = 0; f < FBlocks+1; f++) {
if (f == FBlocks) {
//On the last one, copy over interval from [iblock, iblock+B-1]
thisi = i;
thisf = F+threadIdx.x;
}
else{
//For the other chunks, copy over interval from [iblock-F, iblock-1]
thisi = i-F+f*blockDim.x;
if (thisi >= iblock) {
continue; //Past F boundary for block at iblock-1
}
thisf = f*blockDim.x+threadIdx.x;
}
for (t = 0; t < TBlocks; t++) {
thist = t*blockDim.y + threadIdx.y;
if (thist >= T) {
continue;
}
//Pull out W[thisi, k, thist]
if (thisi < 0 || thisi >= M) {
x[T*thisf+thist] = 0;
}
else {
//x[T*thisf+thist] = W[thisi*KT+k*T+thist];
x[T*thisf+thist] = W[thist*MK+thisi*K+k];
}
}
}
__syncthreads();
//Step 2: Load chunks of H into shared memory
//H goes from k, jblock-T+1:jblock+B-1, 0:F at an offset of (F+B)*T
for (t = 0; t < TBlocks+1; t++) {
if (t == TBlocks) {
//On the last one, copy over interval from [jblock:jblock+B-1]
thisj = j;
thist = T+threadIdx.y;
}
else {
//For the other chunks, copy over interval from [jblock-T:jblock-1]
thisj = j-T+t*blockDim.y;
if (thisj >= jblock) {
continue; //Past T boundary for block at jblock-1
}
thist = t*blockDim.y+threadIdx.y;
}
for (f = 0; f < FBlocks; f++) {
thisf = f*blockDim.x + threadIdx.x;
if (thisf >= F) {
continue;
}
//Pull out H[k, thisj, f] and put in at an offset
if (thisj < 0 || thisj >= N) {
x[hoff + F*thist+thisf] = 0;
}
else{
//x[hoff + F*thist+thisf] = H[k*NF + thisj*F + thisf];
x[hoff + F*thist+thisf] = H[thisf*KN + k*N + thisj];
}
}
}
__syncthreads();
//Step 3: Do matrix multiplication
for (f = 0; f < F; f++) {
for (t = 0; t < T; t++) {
//W[i-f, k, t]*H[k, j-t, f]
res += x[(F+threadIdx.x-f)*T + t]*x[hoff+(T+threadIdx.y-t)*F+f];
}
}
__syncthreads();//The lack of this sync at the end of each k
// was causing a major bug!!!
}
if (i < M && j < N) {
Lam[i*N+j] = res;
}
}
|
9,686 | #include<iostream>
#include<math.h>
#include<stdint.h>
#include<stdlib.h>
#define N 16
#define M 16
/*
each thread of this function handles one of the 25 convolution procedures
necessary for a 5x5 box filter(aka kernel). 25 pixels from the original
image must each be matched to one of the 25 values from the box filter,
and it must be done through use of their individual threadId.
*/
__global__
void convolve(uint8_t input[N][M],
int *numer,
int* denom,
int* kernel,
int i,
int j)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
//location in box kernel to be convolved by this thread
int kpos = tx + ty * 5;
//pixel location in the input matrix, (x, y)
int x = i + tx - 2;
int y = j + ty - 2;
/*
We now know which location from the kernel matches which pixel
from the image, but before we continue we must account for
the bounds of the input matrix. Depending on the pixel being
sampled from the original image at (i, j), we may not be able
to make use of the entire kernel. Some threads may try to
access out of bounds when (i, j) lies close to the border. In
this case we only use the threads that lie within the bounds
of the image. Our image is of size NxM so:
0 <= x < N
0 <= y < M
*/
if (x>=0 && y>=0 && x<N && y<M)
{
/*
The convolution procedure is to average the pixel values
from the original image with some being weighted more than
others. 25 pixels in the original image are weighted by
a factor equal to its corresponding value in the kernel.
Then, all these weighted values are accumulated and divided
by the total weight of the kernel. It would be pointless
for each and every thread to perform the division (as it
would be exactly the same every time), so we only go as
far as accumulating the weighted values and kernel values
in global memory. atomicAdd prevents the accumulation from
writing over itself.
*/
int weightedVal = kernel[kpos] * int(input[x][y]);
int kVal = kernel[kpos];
atomicAdd(numer, weightedVal);
atomicAdd(denom, kVal);
}
}
void gauss(uint8_t input[N][M], uint8_t output[N][M])
{
/*
First I declare and allocate global space for our box filter.
I will be using a Gaussian filter, which is a bell curve
with greater values in the middle. Using this filter for
such a convolution is called a gaussian blur and has several
applications; I am familiar with it from scaling images and
feature extraction algorithms such as SIFT. Gaussian filters
of different sizes and distributions may be employed here,
and generating them would be a significant upgrade over my
hardcoding of the standard 5x5 gaussian filter.
*/
int* kernel;
cudaMallocManaged(&kernel, sizeof(int) * 25);
int dummy[25] = { 1, 4, 7, 4, 1,
4,16,26,16, 4,
7,26,41,26, 7,
4,16,26,16, 4,
1, 4, 7, 4, 1 };
for (int i=0; i<25; i++)
kernel[i] = dummy[i];
//accumulators which our convolve function requires
int *numer;
int *denom;
cudaMallocManaged(&numer, sizeof(int));
cudaMallocManaged(&denom, sizeof(int));
/*
Before I can call convolve I must define the dimensions of the
block. A block is a collection of threads to be run together in
parallel, and I have decided each block will handle the gaussian
of each pixel. That means we need 25 threads per block, which
can be arranged in a 5x5 formation to better align with the 5x5
kernel.
*/
dim3 blockSize(5,5);
/*
(i, j) represents the coordinates of the pixel we're performing
a gaussian blur on. the following nested loops iterate through
every pixel of the input image matrix.
*/
for (int j = 0; j<N; j++)
{
for (int i = 0; i<M; i++)
{
//explained in convolution procedure
*numer = 0;
*denom = 0;
convolve<<<1,blockSize>>>(input, numer, denom, kernel, i, j);
cudaDeviceSynchronize();
//could this be parallelized as well? is it worth it?
output[i][j] = uint8_t((*numer) / (*denom));
}
}
cudaFree(kernel);
cudaFree(numer);
cudaFree(denom);
}
/*
print function for the values of a matrix of unsigned 8 bit ints,
otherwise known as the data values of a greyscale image.
*/
void print(uint8_t image[N][M])
{
for (int i=0; i<N; i++)
{
for (int j=0; j<M; j++)
{
std::cout<< int(image[i][j]) << ",\t";
}
std::cout<< "\n";
}
}
int main()
{
srand(NULL);
uint8_t *image, blur[N][M];
cudaMallocManaged(&image, N*M*sizeof(uint8_t));
for (int i = 0; i<N; i++)
for (int j = 0; j<M; j++)
reinterpret_cast<uint8_t (*)[M]>(image)[i][j] = rand()% 256;
/*
cudaMallocManaged has certain limitations when it comes to 2D arrays
so image has been allocated as a 1D array and then cast to a 2D.
blur doesn't need to be allocated to global mem (doesn't run on device
code), so it's declared locally as a 2D array and passed as such.
*/
print(reinterpret_cast<uint8_t (*)[M]>(image));
gauss(reinterpret_cast<uint8_t (*)[M]>(image), blur);
std::cout<<"\n";
print(blur);
cudaFree(image);
cudaFree(blur);
return 0;
}
|
9,687 | #include <stdio.h>
#include <cuda.h>
#include <math.h>
// Uses Cyclic Reduction (CR) algorithm on GPU to solve the system of equations.
__global__ void FwdReduction(float* d_A, float* d_F)
{
float alpha,gamma;
int index1,index2,offset;
int i=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockDim.x;
// Forward reduction
for (int j=powf(2,i+1)-1;j<n;j+=powf(2,i+1))
{
offset=powf(2.0f,(float)i);
index1=j-offset;
index2=j+offset;
printf("%d %d %d\n",index1,index2,i);
alpha=d_A[n*j+index1]/d_A[n*index1+index1];
gamma=d_A[n*j+index2]/d_A[n*index2+index2];
for (int k=0;k<n;k++)
{
d_A[n*j+k]-=(alpha*d_A[n*index1+k]+gamma*d_A[n*index2+k]);
}
d_F[j]-=(alpha*d_F[index1]+gamma*d_F[index2]);
}
}
__global__ void BackSub(float* d_A, float* d_x, float* d_F)
{
int index1,index2,offset;
int i=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockDim.x;
int index=(n-1)/2;
d_x[index]=d_F[index]/d_A[n*index+index];
for (int j=powf(2,i+1)-1;j<n;j+=powf(2,i+1)) {
offset=powf(2,i);
index1=j-offset;
index2=j+offset;
d_x[index1]=d_F[index1];
d_x[index2]=d_F[index2];
__syncthreads();
for (int k=0;k<n;k++)
{
if (k!=index1) d_x[index1]-=d_A[n*index1+k]*d_x[k];
if (k!=index2) d_x[index2]-=d_A[n*index2+k]*d_x[k];
}
__syncthreads();
d_x[index1]=d_x[index1]/d_A[n*index1+index1];
d_x[index2]=d_x[index1]/d_A[n*index2+index2];
}
}
int main(int argc, char** argv)
{
// Declare variables
const int p=4;
const int n=pow(2,p)-1;
const int BYTES=n*sizeof(float);
const float s=2.0f;
const float r=2.0f+s;
// Declare arrays
float* h_A = new float[n*n];
float* h_x = new float[n];
float* h_F = new float[n];
// Initialize the arrays
for (int i=0;i<n;i++)
{
h_x[i]=0.0f;
h_F[i]=0.0f;
for (int j=0;j<n;j++)
{
if (i-j==0) h_A[i*n+j]=r;
else if (abs(i-j)==1) h_A[i*n+j]=-1.0f;
else h_A[i*n+j]=0.0f;
}
}
h_A[0]=h_A[n*(n-1)+(n-1)]=r;
h_A[1]=h_A[n*(n-1)+(n-2)]=-1.0f;
h_F[(int)n/2]=0.2f;
h_F[(int)n/2-1]=0.1f;
h_F[(int)n/2+1]=0.1f;
// Declare GPU memory pointers
float* d_A;
float* d_x;
float* d_F;
// Allocate memory on device
cudaMalloc((void**)&d_A,BYTES*BYTES);
cudaMalloc((void**)&d_x,BYTES);
cudaMalloc((void**)&d_F,BYTES);
// Transfer the array to the GPU
// Destination, source, size, method
cudaMemcpy(d_A,h_A,BYTES*BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_x,h_x,BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_F,h_F,BYTES,cudaMemcpyHostToDevice);
// Kernel launch
FwdReduction<<<1,n>>>(d_A,d_F);
cudaDeviceSynchronize();
BackSub<<<1,n>>>(d_A,d_x,d_F);
// Copy results back to device
// Destination, source, size, method
cudaMemcpy(h_A,d_A,BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(h_x,d_x,BYTES,cudaMemcpyDeviceToHost);
cudaMemcpy(h_F,d_F,BYTES,cudaMemcpyDeviceToHost);
for (int i=0;i<n;i++)
{
printf("%f ",h_x[i]);
printf("\n");
}
printf("\n");
// Free memory
cudaFree(d_A);
cudaFree(d_x);
cudaFree(d_F);
delete[] h_A;
delete[] h_x;
delete[] h_F;
}
|
9,688 | #include <cuda_runtime.h>
#include <device_launch_parameters.h> // threadIdx
#include <device_functions.h>
#include <cstdio>
static float* d_in;
static float* d_out;
static unsigned int h_Width;
static unsigned int h_Height;
static unsigned int h_BlockWidth;
static unsigned int h_BlockHeight;
#define THREAD_TOTAL_X_LEN 16
#define THREAD_AUX_X_LEN 2
#define THREAD_WORKING_X_LEN (THREAD_TOTAL_X_LEN - THREAD_AUX_X_LEN)
#define THREAD_TOTAL_Y_LEN 16
#define THREAD_AUX_Y_LEN 2
#define THREAD_WORKING_Y_LEN (THREAD_TOTAL_Y_LEN - THREAD_AUX_Y_LEN)
#define OFFSET(x,y) sIdx + y * THREAD_TOTAL_X_LEN + x
__global__ void Sobel(const float* in,float* out, const unsigned int width, const unsigned int height)
{
extern __shared__ float s[];
const unsigned int xPos = (blockIdx.x * THREAD_WORKING_X_LEN + threadIdx.x) - (THREAD_AUX_X_LEN / 2);
const unsigned int yPos = (blockIdx.y * THREAD_WORKING_Y_LEN + threadIdx.y) - (THREAD_AUX_Y_LEN / 2);
const unsigned int inPos = (xPos + yPos * width);
const unsigned int sIdx = (threadIdx.x + threadIdx.y * THREAD_TOTAL_X_LEN);
if (xPos < width && yPos < height)
s[sIdx] = in[inPos];
else
s[sIdx] = 0;
__syncthreads();
if ((threadIdx.x - (THREAD_AUX_X_LEN / 2)) < THREAD_WORKING_X_LEN && (threadIdx.y - (THREAD_AUX_X_LEN / 2)) < THREAD_WORKING_Y_LEN)
{
const float sobelX = (
-1 * s[OFFSET(-1,-1)] +0 * s[OFFSET(0,-1)] +1 * s[OFFSET(1,-1)]
-2 * s[OFFSET(-1, 0)] +0 * s[OFFSET(0, 0)] +2 * s[OFFSET(1, 0)]
-1 * s[OFFSET(-1, 1)] +0 * s[OFFSET(0, 1)] +1 * s[OFFSET(1, 1)]
) * 0.25f;
const float sobelY = (
+1 * s[OFFSET(-1,-1)] +2 * s[OFFSET(0,-1)] +1 * s[OFFSET(1,-1)]
+0 * s[OFFSET(-1, 0)] +0 * s[OFFSET(0, 0)] +0 * s[OFFSET(1, 0)]
-1 * s[OFFSET(-1, 1)] -2 * s[OFFSET(0, 1)] -1 * s[OFFSET(1, 1)]
) * 0.25f;
const float gradientLen = sqrt(sobelX*sobelX + sobelY*sobelY + 1.0f);
const float xLen = (-sobelX/gradientLen) * 0.5f + 0.5f;
const float yLen = (-sobelY/gradientLen) * 0.5f + 0.5f;
const float zLen = 1.0f/gradientLen;
out[inPos * 4 + 0] = xLen;
out[inPos * 4 + 1] = yLen;
out[inPos * 4 + 2] = zLen;
out[inPos * 4 + 3] = 1.0f;
}
}
void InitBumpToNormalMap(const unsigned int width, const unsigned int height)
{
h_Width = width;
h_Height = height;
h_BlockWidth = (h_Width / THREAD_WORKING_X_LEN);
h_BlockHeight = (h_Height / THREAD_WORKING_Y_LEN);
cudaMalloc(&d_in, h_Width * h_Height * 1 * sizeof(float));
cudaMalloc(&d_out, h_Width * h_Height * 4 * sizeof(float));
}
int KernelBumpToNormalMap(float* h_in_img,float* h_out_img)
{
cudaError_t error = cudaMemcpy(d_in, h_in_img, h_Width * h_Height * 1 * sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
return error;
Sobel<<<dim3(h_BlockWidth, h_BlockHeight, 1), dim3(THREAD_TOTAL_X_LEN, THREAD_TOTAL_Y_LEN, 1), THREAD_TOTAL_X_LEN * THREAD_TOTAL_Y_LEN * sizeof(float)>>>(d_in, d_out, h_Width, h_Height);
error = cudaGetLastError();
if (error != cudaSuccess)
return error;
error = cudaMemcpy(h_out_img, d_out, h_Width * h_Height * 4 * sizeof(float), cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
return error;
return cudaSuccess;
}
void ShutdownBumpToNormalMap()
{
cudaFree(d_in);
cudaFree(d_out);
}
|
9,689 | #include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#include <stdio.h>
#include <time.h>
#define TILE_SIZE 32
//#define MAX_L 16
__global__
void C_e_H_optim_compute_P_HT_kernel(double *e, double *C, double *H,
int *H_indices, int *H_indptr,
double *P_HT, int M, int N, int L, int n) {
__shared__ double P_tile[TILE_SIZE][TILE_SIZE];
//__shared__ double H_tile[TILE_SIZE][TILE_SIZE];
int tile_row = threadIdx.y;
int tile_col = threadIdx.x;
int P_HT_row = blockIdx.y * blockDim.y + tile_row;
int P_HT_col = blockIdx.x * blockDim.x + tile_col;
int H_indptr_start = H_indptr[P_HT_col];
int H_indptr_end = H_indptr[P_HT_col + 1];
double P_HT_value = 0;
for (int n = 0; n < N; n += TILE_SIZE) {
int P_row = P_HT_row;
int P_col = n + tile_col;
//int H_row = P_HT_col;
//int H_col = n + tile_row;
int e_row_idx = P_row * L;
int e_col_idx = P_col * L;
if (P_row < N && P_col < N) {
double e_eT_value = 0;
for (int l = 0; l < L; l ++) {
e_eT_value += e[e_row_idx + l] * e[e_col_idx + l];
}
int diff_n = P_col / n * n - P_row / n * n;
int C_idx = diff_n * 2 + n - 1 + P_col % n - P_row % n;
P_tile[tile_row][tile_col] = C[C_idx] * e_eT_value;
} else {
P_tile[tile_row][tile_col] = 0;
}
__syncthreads();
//if (H_row < M && H_col < N) {
// H_tile[tile_row][tile_col] = H[H_row * N + H_col];
//} else {
// H_tile[tile_row][tile_col] = 0;
//}
//__syncthreads();
for (int t = H_indptr_start; t < H_indptr_end; t ++) {
int H_tile_col = H_indices[t] - n;
if (H_tile_col < TILE_SIZE) {
P_HT_value += P_tile[tile_row][H_tile_col] * H[t];
H_indptr_start += 1;
} else {
break;
}
}
//for (int t = 0; t < TILE_SIZE; t ++) {
// P_HT_value += P_tile[tile_row][t] * H_tile[t][tile_col];
//}
}
if (P_HT_row < N && P_HT_col < M) {
P_HT[P_HT_row * M + P_HT_col] = P_HT_value / (L - 1);
}
}
/*
__global__
void C_e_optim_compute_P_HT_kernel(double *e, double *C, double *H,
double *P_HT, int M, int N, int L) {
__shared__ double P_tile[TILE_SIZE][TILE_SIZE];
__shared__ double H_tile[TILE_SIZE][TILE_SIZE];
int tile_row = threadIdx.y;
int tile_col = threadIdx.x;
int P_HT_row = blockIdx.y * blockDim.y + tile_row;
int P_HT_col = blockIdx.x * blockDim.x + tile_col;
double P_HT_value = 0;
for (int n = 0; n < N; n += TILE_SIZE) {
int P_row = P_HT_row;
int P_col = n + tile_col;
int H_row = P_HT_col;
int H_col = n + tile_row;
int e_row_idx = P_row * L;
int e_col_idx = P_col * L;
if (P_row < N && P_col < N) {
double e_eT_value = 0;
for (int l = 0; l < L; l ++) {
e_eT_value += e[e_row_idx + l] * e[e_col_idx + l];
}
P_tile[tile_row][tile_col] = C[N - 1 + P_col - P_row] * e_eT_value;
} else {
P_tile[tile_row][tile_col] = 0;
}
__syncthreads();
if (H_row < M && H_col < N) {
H_tile[tile_row][tile_col] = H[H_row * N + H_col];
} else {
H_tile[tile_row][tile_col] = 0;
}
__syncthreads();
for (int t = 0; t < TILE_SIZE; t ++) {
P_HT_value += P_tile[tile_row][t] * H_tile[t][tile_col];
}
}
if (P_HT_row < N && P_HT_col < M) {
P_HT[P_HT_row * M + P_HT_col] = P_HT_value / (L - 1);
}
}
__global__
void C_optim_compute_P_HT_kernel(double *e, double *C, double *H,
double *P_HT, int M, int N, int L) {
__shared__ double P_tile[TILE_SIZE][TILE_SIZE];
__shared__ double H_tile[TILE_SIZE][TILE_SIZE];
__shared__ double e_row_tile[TILE_SIZE][MAX_L];
__shared__ double e_col_tile[TILE_SIZE][MAX_L];
int tile_row = threadIdx.y;
int tile_col = threadIdx.x;
int P_HT_row = blockIdx.y * blockDim.y + tile_row;
int P_HT_col = blockIdx.x * blockDim.x + tile_col;
double P_HT_value = 0;
for (int n = 0; n < N; n += TILE_SIZE) {
int P_row = P_HT_row;
int P_col = n + tile_col;
int H_row = P_HT_col;
int H_col = n + tile_row;
int e_row_idx = P_row * L;
for (int l = 0; l < L; l += TILE_SIZE) {
e_row_tile[tile_row][l + tile_col] = e[e_row_idx + l + tile_col];
}
int e_col_idx = P_col * L;
for (int l = 0; l < L; l += TILE_SIZE) {
e_col_tile[tile_col][l + tile_row] = e[e_col_idx + l + tile_row];
}
__syncthreads();
if (P_row < N && P_col < N) {
double e_eT_value = 0;
for (int l = 0; l < L; l ++) {
e_eT_value += e_row_tile[tile_row][l] * e_col_tile[tile_col][l];
}
P_tile[tile_row][tile_col] = C[N - 1 + P_col - P_row] * e_eT_value;
} else {
P_tile[tile_row][tile_col] = 0;
}
if (H_row < M && H_col < N) {
H_tile[tile_row][tile_col] = H[H_row * N + H_col];
} else {
H_tile[tile_row][tile_col] = 0;
}
__syncthreads();
for (int t = 0; t < TILE_SIZE; t ++) {
P_HT_value += P_tile[tile_row][t] * H_tile[t][tile_col];
}
}
if (P_HT_row < N && P_HT_col < M) {
P_HT[P_HT_row * M + P_HT_col] = P_HT_value / (L - 1);
}
}
__global__
void compute_P_kernel(double *e, double *C, double *P,
int L, int N) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
double e_elem = 0;
for (int l = 0; l < L; l ++) {
e_elem += e[col * L + l] * e[row * L + l];
}
if (col < N && row < N) {
P[row * N + col] = C[row * N + col] * e_elem;
}
}
__global__
void compute_P_HT_kernel(double *P, double *H, double *P_HT,
int M, int N, int L) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if (row < N && col < M) {
double Value = 0;
for (int i = 0; i < N; i ++) {
Value += P[row * N + i] * H[col * N + i];
}
P_HT[row * M + col] = Value / double(L - 1);
}
}
*/
int load_sparse_matrix_nnz(const char *fname,
int dim1,
int dim2) {
FILE *fid;
int sizeof_elem;
int m, n, nnz;
int r;
printf("Loading %s into CPU memory...\n", fname);
fid = fopen(fname, "r");
assert(fid);
r = fread(&sizeof_elem, sizeof(int), 1, fid);
assert(sizeof_elem == sizeof(double));
r = fread(&m, sizeof(int), 1, fid);
assert(m == dim1);
r = fread(&n, sizeof(int), 1, fid);
assert(n == dim2);
r = fread(&nnz, sizeof(int), 1, fid);
assert(r == 1);
fclose(fid);
return nnz;
}
void load_sparse_matrix(const char *fname,
double *buffer,
int *indices,
int *indptr,
int dim1,
int dim2) {
FILE *fid;
int sizeof_elem;
int m, n, nnz;
int r;
printf("Loading %s into CPU memory...\n", fname);
fid = fopen(fname, "r");
assert(fid);
r = fread(&sizeof_elem, sizeof(int), 1, fid);
assert(sizeof_elem == sizeof(double));
r = fread(&m, sizeof(int), 1, fid);
assert(m == dim1);
r = fread(&n, sizeof(int), 1, fid);
assert(n == dim2);
r = fread(&nnz, sizeof(int), 1, fid);
assert(r == 1);
r = fread(buffer, sizeof(double), nnz, fid);
assert(r == nnz);
r = fread(indices, sizeof(int), nnz, fid);
assert(r == nnz);
r = fread(indptr, sizeof(int), m + 1, fid);
assert(r == m + 1);
printf("Size of %s is: %d\n", fname, nnz);
fclose(fid);
}
void load_matrix(const char *fname,
double *buffer,
int dim1,
int dim2) {
FILE *fid;
int r;
printf("Loading %s into CPU memory...\n", fname);
fid = fopen(fname, "r");
assert(fid);
r = fseek(fid, 0L, SEEK_END);
int matrix_size = ftell(fid) / sizeof(double);
assert(matrix_size == (dim1 * dim2));
r = fseek(fid, 0L, SEEK_SET);
r = fread(buffer, sizeof(double), matrix_size, fid);
assert(r == matrix_size);
printf("Size of %s is: %d\n", fname, matrix_size);
fclose(fid);
}
void save_matrix(const char *fname,
double *buffer,
int dim1,
int dim2) {
FILE *fid;
int r;
int matrix_size = dim1 * dim2;
printf("Saving %s into file...\n", fname);
fid = fopen(fname, "w");
assert(fid);
r = fwrite(buffer, sizeof(double), matrix_size, fid);
assert(r == matrix_size);
printf("Size of %s is: %d\n", fname, matrix_size);
fclose(fid);
}
/*
void do_compute_P_HT(const char *P_HT_fname,
const char *e_fname,
const char *H_fname,
const char *C_fname,
const char *N_c,
const char *L_c,
const char *M_c) {
int N = atoi(N_c);
int L = atoi(L_c);
int M = atoi(M_c);
double *e = (double *) malloc(N * L * sizeof(double));
double *H = (double *) malloc(M * N * sizeof(double));
double *C = (double *) malloc(N * N * sizeof(double));
double *P_HT = (double *) malloc(N * M * sizeof(double));
clock_t start, finish;
printf("0. Problem size: N=%d, L=%d, M=%d\n\n", N, L, M);
printf("1. Load data into CPU memory.\n");
load_matrix(e_fname, e, N, L);
load_matrix(H_fname, H, M, N);
load_matrix(C_fname, C, N, N);
printf("2. Allocate GPU memory.\n");
double *e_device;
double *H_device;
double *C_device;
double *P_device;
double *P_HT_device;
cudaMalloc((void **) &e_device, N * L * sizeof(double));
cudaMalloc((void **) &H_device, M * N * sizeof(double));
cudaMalloc((void **) &C_device, N * N * sizeof(double));
cudaMalloc((void **) &P_device, N * N * sizeof(double));
cudaMalloc((void **) &P_HT_device, N * M * sizeof(double));
printf("3. Write data into GPU memory.\n");
start = clock();
cudaMemcpy(e_device, e, N * L * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(H_device, H, M * N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(C_device, C, N * N * sizeof(double), cudaMemcpyHostToDevice);
finish = clock();
printf("Latency: %fms\n", (double)(finish - start) * 1000 / CLOCKS_PER_SEC);
printf("4. Call GPU cuda kernel.\n");
start = clock();
dim3 DimGrid;
dim3 DimBlock;
DimGrid = dim3(ceil(N / float(TILE_SIZE)), ceil(N / float(TILE_SIZE)), 1);
DimBlock = dim3(TILE_SIZE, TILE_SIZE, 1);
compute_P_kernel<<<DimGrid, DimBlock>>>(e_device, C_device, P_device, L, N);
cudaDeviceSynchronize();
DimGrid = dim3(ceil(M / float(TILE_SIZE)), ceil(N / float(TILE_SIZE)), 1);
DimBlock = dim3(TILE_SIZE, TILE_SIZE, 1);
compute_P_HT_kernel<<<DimGrid, DimBlock>>>(P_device, H_device, P_HT_device, M, N, L);
cudaDeviceSynchronize();
finish = clock();
printf("Latency: %fms\n", (double)(finish - start) * 1000 / CLOCKS_PER_SEC);
printf("5. Read results from GPU memory.\n");
start = clock();
cudaMemcpy(P_HT, P_HT_device, N * M * sizeof(double), cudaMemcpyDeviceToHost);
finish = clock();
printf("Latency: %fms\n", (double)(finish - start) * 1000 / CLOCKS_PER_SEC);
printf("6. Save results to file.\n");
save_matrix(P_HT_fname, P_HT, N, M);
printf("7. De-allocate CPU and GPU memory.\n");
cudaFree(e_device);
cudaFree(H_device);
cudaFree(C_device);
cudaFree(P_device);
cudaFree(P_HT_device);
free(e);
free(H);
free(C);
free(P_HT);
}
void do_optim_compute_P_HT(const char *P_HT_fname,
const char *e_fname,
const char *H_fname,
const char *C_fname,
const char *N_c,
const char *L_c,
const char *M_c) {
int N = atoi(N_c);
int L = atoi(L_c);
int M = atoi(M_c);
double *e = (double *) malloc(N * L * sizeof(double));
double *H = (double *) malloc(M * N * sizeof(double));
double *C = (double *) malloc(2 * N * sizeof(double));
double *P_HT = (double *) malloc(N * M * sizeof(double));
clock_t start, finish;
printf("0. Problem size: N=%d, L=%d, M=%d\n\n", N, L, M);
printf("1. Load data into CPU memory.\n");
load_matrix(e_fname, e, N, L);
load_matrix(H_fname, H, M, N);
load_matrix(C_fname, C, 2, N);
printf("2. Allocate GPU memory.\n");
double *e_device;
double *H_device;
double *C_device;
double *P_HT_device;
cudaMalloc((void **) &e_device, N * L * sizeof(double));
cudaMalloc((void **) &H_device, M * N * sizeof(double));
cudaMalloc((void **) &C_device, 2 * N * sizeof(double));
cudaMalloc((void **) &P_HT_device, N * M * sizeof(double));
printf("3. Write data into GPU memory.\n");
start = clock();
cudaMemcpy(e_device, e, N * L * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(H_device, H, M * N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(C_device, C, 2 * N * sizeof(double), cudaMemcpyHostToDevice);
finish = clock();
printf("Latency: %fms\n", (double)(finish - start) * 1000 / CLOCKS_PER_SEC);
printf("4. Call GPU cuda kernel.\n");
start = clock();
dim3 DimGrid;
dim3 DimBlock;
DimGrid = dim3(ceil(N / float(TILE_SIZE)), ceil(N / float(TILE_SIZE)), 1);
DimBlock = dim3(TILE_SIZE, TILE_SIZE, 1);
C_e_optim_compute_P_HT_kernel<<<DimGrid, DimBlock>>>(e_device, C_device, H_device, P_HT_device, M, N, L);
cudaDeviceSynchronize();
finish = clock();
printf("Latency: %fms\n", (double)(finish - start) * 1000 / CLOCKS_PER_SEC);
printf("5. Read results from GPU memory.\n");
start = clock();
cudaMemcpy(P_HT, P_HT_device, N * M * sizeof(double), cudaMemcpyDeviceToHost);
finish = clock();
printf("Latency: %fms\n", (double)(finish - start) * 1000 / CLOCKS_PER_SEC);
printf("6. Save results to file.\n");
save_matrix(P_HT_fname, P_HT, N, M);
printf("7. De-allocate CPU and GPU memory.\n");
cudaFree(e_device);
cudaFree(H_device);
cudaFree(C_device);
cudaFree(P_HT_device);
free(e);
free(H);
free(C);
free(P_HT);
}
*/
void do_C_e_H_optim_compute_P_HT(const char *P_HT_fname,
const char *e_fname,
const char *H_fname,
const char *C_fname,
const char *N_c,
const char *L_c,
const char *M_c,
const char *n_c) {
int N = atoi(N_c);
int L = atoi(L_c);
int M = atoi(M_c);
int n = atoi(n_c);
double *e = (double *) malloc(N * L * sizeof(double));
double *C = (double *) malloc(2 * N * sizeof(double));
double *P_HT = (double *) malloc(N * M * sizeof(double));
clock_t start, finish;
printf("0. Problem size: N=%d, L=%d, M=%d\n\n", N, L, M);
printf("1. Load data into CPU memory.\n");
int H_nnz = load_sparse_matrix_nnz(H_fname, M, N);
double *H = (double *) malloc(H_nnz * sizeof(double));
int *H_indices = (int *) malloc(H_nnz * sizeof(int));
int *H_indptr = (int *) malloc((M + 1) * sizeof(int));
load_matrix(e_fname, e, N, L);
load_sparse_matrix(H_fname, H, H_indices, H_indptr, M, N);
load_matrix(C_fname, C, 2, N);
printf("2. Allocate GPU memory.\n");
double *e_device;
double *H_device;
int *H_indices_device;
int *H_indptr_device;
double *C_device;
double *P_HT_device;
cudaMalloc((void **) &e_device, N * L * sizeof(double));
cudaMalloc((void **) &H_device, H_nnz * sizeof(double));
cudaMalloc((void **) &H_indices_device, H_nnz * sizeof(int));
cudaMalloc((void **) &H_indptr_device, (M + 1) * sizeof(int));
cudaMalloc((void **) &C_device, 2 * N * sizeof(double));
cudaMalloc((void **) &P_HT_device, N * M * sizeof(double));
printf("3. Write data into GPU memory.\n");
start = clock();
cudaMemcpy(e_device, e, N * L * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(H_device, H, H_nnz * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(H_indices_device, H_indices, H_nnz * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(H_indptr_device, H_indptr, (M + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(C_device, C, 2 * N * sizeof(double), cudaMemcpyHostToDevice);
finish = clock();
printf("Latency: %fms\n", (double)(finish - start) * 1000 / CLOCKS_PER_SEC);
printf("4. Call GPU cuda kernel.\n");
start = clock();
dim3 DimGrid;
dim3 DimBlock;
DimGrid = dim3(ceil(M / float(TILE_SIZE)), ceil(N / float(TILE_SIZE)), 1);
DimBlock = dim3(TILE_SIZE, TILE_SIZE, 1);
C_e_H_optim_compute_P_HT_kernel<<<DimGrid, DimBlock>>>(e_device, C_device, H_device, H_indices_device, H_indptr_device, P_HT_device, M, N, L, n);
cudaDeviceSynchronize();
finish = clock();
printf("Latency: %fms\n", (double)(finish - start) * 1000 / CLOCKS_PER_SEC);
printf("5. Read results from GPU memory.\n");
start = clock();
cudaMemcpy(P_HT, P_HT_device, N * M * sizeof(double), cudaMemcpyDeviceToHost);
finish = clock();
printf("Latency: %fms\n", (double)(finish - start) * 1000 / CLOCKS_PER_SEC);
printf("6. Save results to file.\n");
save_matrix(P_HT_fname, P_HT, N, M);
printf("7. De-allocate CPU and GPU memory.\n");
cudaFree(e_device);
cudaFree(H_device);
cudaFree(H_indices_device);
cudaFree(H_indptr_device);
cudaFree(C_device);
cudaFree(P_HT_device);
free(e);
free(H);
free(H_indices);
free(H_indptr);
free(C);
free(P_HT);
}
int main(int argc, char **argv) {
//if (argc != 8) {
// fprintf(stderr, "Usage %s <P_HT_fname> <e_fname> <H_fname> <C_fname>\n", argv[0]);
// return 1;
//}
//do_compute_P_HT(argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
//do_optim_compute_P_HT(argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
do_C_e_H_optim_compute_P_HT(argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
return 0;
}
|
9,690 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define REPEAT 1
__global__ void arrayFunc(float* d_idata, float* d_jdata, float* d_odata, int size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
for(int i=0; i < REPEAT; i++)
d_odata[tid] = d_idata[tid] * __expf(d_jdata[tid]);
}
}
void initArrayData(float * array, float alpha, int size);
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size);
void getChunkInfo(int i, int *d_offset, int *chunk_size, int *h_offset, int *chunk_stream, int nSize, int chunk_size_max, int num_chunk, int num_streams);
#define NSIZE 2097152
#define CHUNKSIZEMAX 65536
#define NUMSTREAMS 8
int
main (void) {
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int nsize = NSIZE;
int nThreads = 256;
int nBlocks;
cudaEvent_t start, end;
float eventEtime;
int chunk_size_max = CHUNKSIZEMAX;
int num_streams = NUMSTREAMS;
int num_chunk;
int i;
int h_offset, d_offset;
int chunk_size, chunk_stream;
cudaStream_t streams[NUMSTREAMS];
// chunk number calculation
num_chunk = (nsize-1) / chunk_size_max + 1;
printf("Number of elements: %d\n", nsize);
printf("Number of streams: %d\n", num_streams);
printf("Number of chunks: %d\n", num_chunk);
// allocation and initialization of host buffers
cudaMallocHost((void**)&h_a, nsize * sizeof(float));
cudaMallocHost((void**)&h_b, nsize * sizeof(float));
cudaMallocHost((void**)&h_c, nsize * sizeof(float));
initArrayData(h_a, 1.0f, nsize);
initArrayData(h_b, 10.0f, nsize);
//-- insert CUDA code ----------------
// device buffers allocation
// streams creation
//------------------------------------
// creation of cuda events: start, end
cudaEventCreate(&start);
cudaEventCreate(&end);
printf ("\nGPU computation ... ");
cudaEventRecord(start,0);
for (i = 0; i < num_chunk; i++) {
// please see getChunkInfo function description
getChunkInfo(i, &d_offset, &chunk_size, &h_offset, &chunk_stream, nsize, chunk_size_max, num_chunk, num_streams);
//-- insert CUDA code ----------------
// host to device buffer copies
//------------------------------------
// block number calculation
nBlocks = (chunk_size-1) / nThreads + 1;
//-- insert CUDA code ----------------
// arrayFunc kernel launch
//------------------------------------
//-- insert CUDA code ----------------
// copy back of results from device
//------------------------------------
}
cudaDeviceSynchronize();
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventEtime, start, end);
printf ("ok\n");
printf("Elapsed time on GPU: %.2f ms\n", eventEtime);
// host computation
printf("\nCPU computation ... ");
float *cpuResult;
float eventTimeCPU;
cudaMallocHost((void**)&cpuResult, nsize * sizeof(float));
cudaEventRecord(start,0);
arrayFuncCPU(h_a, h_b, cpuResult, nsize);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventTimeCPU, start, end);
printf ("ok\n");
printf("Elapsed time on CPU: %.2f ms\n", eventTimeCPU);
printf("\nSpeed UP CPU/GPU %.1fx\n", eventTimeCPU/eventEtime);
printf("\nCheck results:\n");
printf ("h_c[0] = %f\n", h_c[0]);
printf ("cpuResult[0] = %f\n", cpuResult[0]);
// free resources on device
for (i = 0; i< num_streams; i++)
cudaStreamDestroy(streams[i]);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// free resources on host
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
return 0;
}
void
initArrayData(float * array, float alpha, int size)
{
int i;
for (i=0; i< size; i++)
array[i] = alpha * (float) rand() / (float) RAND_MAX;
}
// getChunkInfo is used to compute some useful information starting
// from the i-th chunk, the total number of used chunks,
// the maximum chunk size and the array size to process
// getChunkInfo returns:
// * chunk_size: the number of elements to use in current chunk
// * chunk_stream: the stream to use to process i-th chunk
// * the X_offsets to use for accessing the correct elements of host
// and device arrays in data movements and kernel launch
//
void getChunkInfo(int i, int *d_offset, int *chunk_size, int *h_offset, int *chunk_stream, int nSize, int chunk_size_max, int num_chunk, int num_streams){
int Reminder = nSize%chunk_size_max;
*h_offset = i*chunk_size_max;
*chunk_stream = i%num_streams;
*chunk_size = chunk_size_max;
*d_offset = *chunk_stream * chunk_size_max;
if (Reminder && (i == num_chunk-1)) *chunk_size = Reminder;
}
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size)
{
int i, j;
for (i=0; i<size; i++)
for (j=0; j<REPEAT; j++)
h_odata[i] = h_idata[i] * expf(h_jdata[i]);
}
|
9,691 | #include "includes.h"
__global__ void initSquare(float* a, float* x, float totalX, int n, int ghosts){
int i = threadIdx.x + blockDim.x*blockIdx.x;
for(int j = 0; blockDim.x*j + i < n; j++){
int index = j*blockDim.x+i;
if(index > n/3 && index < 2*n/3)
a[index+ghosts] = 1.5;
else a[index+ghosts] = .5;
}
__syncthreads();
if(i==0){ //copy over for boundary conditions
for(int j = 0; j < ghosts; j++){
a[j] = a[j+n];
a[n+ghosts+j] = a[ghosts+j];
// a[j] = a[ghosts];
// a[n+ghosts+j] = a[n+ghosts-1];
}
// for(int z = 0; z < n+2*ghosts; z++){
// printf("%5d %10f\n", z, a[z]);
// }
}
} |
9,692 | #include "includes.h"
/* This code will generate a fractal image. Uses OpenCV, to compile:
nvcc CudaFinal.cu `pkg-config --cflags --libs opencv` */
typedef enum color {BLUE, GREEN, RED} Color;
__global__ void convert_to_rgb(float *hsv, unsigned char *dest, int width, int heigth, int step, int channels) {
float r, g, b;
float h, s, v;
int ren,col;
ren = blockIdx.x;
col = threadIdx.x;
h = hsv[(ren * step) + (col * channels) + RED];
s = hsv[(ren * step) + (col * channels) + GREEN];
v = hsv[(ren * step) + (col * channels) + BLUE];
float f = h/60.0f;
float hi = floorf(f);
f = f - hi;
float p = v * (1 - s);
float q = v * (1 - s * f);
float t = v * (1 - s * (1 - f));
if(hi == 0.0f || hi == 6.0f) {
r = v;
g = t;
b = p;
} else if(hi == 1.0f) {
r = q;
g = v;
b = p;
} else if(hi == 2.0f) {
r = p;
g = v;
b = t;
} else if(hi == 3.0f) {
r = p;
g = q;
b = v;
} else if(hi == 4.0f) {
r = t;
g = p;
b = v;
} else {
r = v;
g = p;
b = q;
}
dest[(ren * step) + (col * channels) + RED] = (unsigned char) __float2uint_rn(255.0f * r);
dest[(ren * step) + (col * channels) + GREEN] = (unsigned char) __float2uint_rn(255.0f * g);
dest[(ren * step) + (col * channels) + BLUE] = (unsigned char) __float2uint_rn(255.0f * b);
} |
9,693 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#include <time.h>
#define SIZE 1000
#define BLKS 4
#define THREADSPBLKS 256
__global__
void heatCalcKernel(float * g_d,float * h_d, int width, int itr)
{
int i = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
int row = (i / width);
int col = i % width;
int left = i - 1;
int right = i + 1;
int top = ((row - 1) * width) + col;
int bottom = ((row + 1) * width + col);
if(((i % width) == 0) || ((i % width) == (width - 1)) || (i < width) || (i >= (width * (width - 1)))){
h_d[i] = g_d[i];
}else{
h_d[i] = 0.25 * (g_d[top] + g_d[left] + g_d[bottom] + g_d[right]);
}
__syncthreads();
g_d[i] = h_d[i];
__syncthreads();
}
void heatCalc()
{
clock_t tic;
clock_t toc;
tic = clock();
int width = 501; //32
int itr = 50;
int len = width * width;
float inhost[len];
float outhost[len];
int j;
float * g_d;
float * h_d;
int counter = 0;
/*----------------------------------------------------------------*/
cudaError_t error;
cudaDeviceProp dev;
error = cudaGetDeviceProperties(&dev, 0);
if(error != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("\nDevice %d:\n", 0);
printf("name: %s\n",dev.name);
cudaSetDevice(0);
/*--------------------------------------------------------------*/
for( j = 0; j < len; j++){
if((j >= 10) && (j <= 30)){
inhost[j] = 150;
}else if((j < width) || ((j % width) == 0) || ((j % width) == (width - 1)) || (j >= (width * (width - 1)))){
inhost[j] = 80;
}else{
inhost[j] = 0;
}
}
for( j = 0; j < len; j++){
outhost[j] = 0;
}
printf("---------\n");
cudaMalloc((void**)&g_d, len*sizeof(float));
//intialize the matrix
cudaMemcpy(g_d,inhost,len*sizeof(float),cudaMemcpyHostToDevice);
cudaMalloc((void**)&h_d, len*sizeof(float));
dim3 dimGrid(491);
dim3 dimBlock(16,32);
// kernel invocation
for(counter = 0; counter < itr; counter++){
heatCalcKernel<<<dimGrid,dimBlock>>>(g_d,h_d,width,itr);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
//transfer C_d from device to host
cudaMemcpy(outhost, h_d, (len*sizeof(float)), cudaMemcpyDeviceToHost);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaFree(g_d);
cudaFree(h_d);
for( j = 0; j < len; j++){
inhost[j] = outhost[j];
}
toc = clock();
double time_taken_parallel = (double)(toc -tic)/CLOCKS_PER_SEC; // in seconds
printf("time taken: %f\n", time_taken_parallel);
}
int main()
{
heatCalc();
return 0;
}
|
9,694 | #ifndef IC_mod
#define IC_mod
#pragma once
// G.K.H. Lee - PA Noti
// translation from from Fortran to C++ by PA Noti - Feb 2021
// last changes in Fortran by GKH-Lee - Oct 2020
#pragma once
#include <cuda_runtime.h>
#include <string>
#include <iostream>
//#include "FMS_RC_para_and_const.h"
//#include "k_Rosseland_mod.h"
//#include <math.h>
// Calculates the IR band Rosseland mean opacity (local T) according to the
// Freedman et al. (2014) fit and coefficents
void k_Ross_Freedman(double Tin, double Pin, double met, double& k_IR) {
// dependcies
//// pow from math
//// log10 from math
//// atan from math
//// onedivpi -> namespace constants::onedivpi
// Input:
// T - Local gas temperature [K]
// P - Local gas pressure [pa]
// met - Local metallicity [M/H] (log10 from solar, solar [M/H] = 0.0)
// Call by reference (Input&Output):
// k_IR - IR band Rosseland mean opacity [m2 kg-1]
// work variables
double k_lowP;
double k_hiP;
double T;
double P;
double Tl10;
double Pl10;
const double pi = atan((double)(1)) * 4;
const double onedivpi = 1.0 / pi;
// Coefficent parameters for Freedman et al. (2014) table fit
double c1 = 10.602;
double c2 = 2.882;
double c3 = 6.09e-15;
double c4 = 2.954;
double c5 = -2.526;
double c6 = 0.843;
double c7 = -5.490;
double c8_l = -14.051, c8_h = 82.241;
double c9_l = 3.055, c9_h = -55.456;
double c10_l = 0.024, c10_h = 8.754;
double c11_l = 1.877, c11_h = 0.7048;
double c12_l = -0.445, c12_h = -0.0414;
double c13_l = 0.8321, c13_h = 0.8321;
// start operations
T = Tin;
P = Pin * ((double)10.0); // Convert to dyne cm-2
Tl10 = log10(T);
Pl10 = log10(P);
// Low pressure expression
k_lowP = c1 * atan(Tl10 - c2) -
(c3 / (Pl10 + c4)) * exp(pow((Tl10 - c5), 2.0)) + c6 * met + c7;
// De log10
k_lowP = pow(((double)10.0), k_lowP);
// Temperature split for coefficents = 800 K
if (T <= 800.0)
{
k_hiP = c8_l + c9_l * Tl10 + c10_l * pow(Tl10, 2.0) +
Pl10 * (c11_l + c12_l * Tl10) +
c13_l * met * (0.5 + onedivpi * atan((Tl10 - ((double)2.5)) / (double)0.2));
}
else
{
k_hiP = c8_h + c9_h * Tl10 +
c10_h * pow(Tl10, 2.0) + Pl10 * (c11_h + c12_h * Tl10) +
c13_h * met * (0.5 + onedivpi * atan((Tl10 - ((double)2.5)) / (double)0.2));
}
// De log10
k_hiP = pow(((double)10.0), k_hiP);
// Total Rosseland mean opacity - converted to m2 kg-1
k_IR = (k_lowP + k_hiP) / ((double)10.0);
// Avoid divergence in fit for large values
if (k_IR > 1.0e10)
{
k_IR = 1.0e10;
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Calculates 3 band grey visual gamma values and 2 picket fence IR gamma values
// according to the coefficents and equations in:
// Parmentier & Menou (2014) and Parmentier et al. (2015)
// NOTE: This does not calculate the opacity - call k_Ross_Freedman for that
void gam_Parmentier(double Teff, int table_num, double(&gam_V)[3], double(&Beta_V)[3],
double(&Beta)[2], double& gam_1, double& gam_2, double& gam_P, double& tau_lim) {
// dependcies
//// pow from math
//// log10 from math
// Input:
// Teff - Effective temperature [K] (See Parmentier papers for various ways to calculate this)
// for non-irradiated atmosphere Teff = Tint
// table_num - Table selection from Parmentier et al. (2015): 1 = w. TiO/VO, 2 = w.o. TiO/VO
// Call by reference (Input&Output):
// gam_V(3) - gamma ratio for 3 visual bands (gam_V = kV_Ross/kIR_Ross)
// beta_V(3) - fraction of total incident stellar flux in band (1/3 for Parmentier values)
// Beta - equilvalent bandwidth for picket fence IR model
// gam_1 - gamma ratio for IR band 1 (gam_1 = kIR_1/kIR_Ross)
// gam_2 - gamma ratio for IR band 2 (gam_2 = kIR_2/kIR_Ross)
// gam_P - gamma ratio for Planck mean (gam_P = kIR_Planck/kIR_Ross)
// tau_lim - tau limit variable (usually for IC system)
// work variables
double R = 0;
double aP = 0;
double bP = 0;
double cP = 0;
double aV1 = 0, bV1 = 0, aV2 = 0, bV2 = 0, aV3 = 0, bV3 = 0;
double aB = 0, bB = 0;
double l10T = 0, l10T2 = 0, RT = 0;
int i;
// start operations
// Log 10 T_eff variables
l10T = log10(Teff);
l10T2 = pow(l10T, 2.0);
if (table_num == 1) {
// First table in Parmentier et al. (2015) w. TiO/VO
// Start large if statements with visual band and Beta coefficents
if (Teff <= 200.0)
{
aV1 = -5.51; bV1 = 2.48;
aV2 = -7.37; bV2 = 2.53;
aV3 = -3.03; bV3 = -0.20;
aB = 0.84; bB = 0.0;
}
else if (Teff > 200.0 && Teff <= 300.0)
{
aV1 = 1.23; bV1 = -0.45;
aV2 = 13.99; bV2 = -6.75;
aV3 = -13.87; bV3 = 4.51;
aB = 0.84; bB = 0.0;
}
else if (Teff > 300.0 && Teff <= 600.0)
{
aV1 = 8.65; bV1 = -3.45;
aV2 = -15.18; bV2 = 5.02;
aV3 = -11.95; bV3 = 3.74;
aB = 0.84; bB = 0.0;
}
else if (Teff > 600.0 && Teff <= 1400.0)
{
aV1 = -12.96; bV1 = 4.33;
aV2 = -10.41; bV2 = 3.31;
aV3 = -6.97; bV3 = 1.94;
aB = 0.84; bB = 0.0;
}
else if (Teff > 1400.0 && Teff < 2000.0)
{
aV1 = -23.75; bV1 = 7.76;
aV2 = -19.95; bV2 = 6.34;
aV3 = -3.65; bV3 = 0.89;
aB = 0.84; bB = 0.0;
}
else if (Teff >= 2000.0)
{
aV1 = 12.65; bV1 = -3.27;
aV2 = 13.56; bV2 = -3.81;
aV3 = -6.02; bV3 = 1.61;
aB = 6.21; bB = -1.63;
}
// gam_P coefficents
aP = -2.36;
bP = 13.92;
cP = -19.38;
}
else if (table_num == 2)
{
// ! Appendix table from Parmentier et al. (2015) - without TiO and VO
if (Teff <= 200.0)
{
aV1 = -5.51; bV1 = 2.48;
aV2 = -7.37; bV2 = 2.53;
aV3 = -3.03; bV3 = -0.20;
aB = 0.84; bB = 0.0;
}
else if (Teff > 200.0 && Teff <= 300.0)
{
aV1 = 1.23; bV1 = -0.45;
aV2 = 13.99; bV2 = -6.75;
aV3 = -13.87; bV3 = 4.51;
aB = 0.84; bB = 0.0;
}
else if (Teff > 300.0 && Teff <= 600.0)
{
aV1 = 8.65; bV1 = -3.45;
aV2 = -15.18; bV2 = 5.02;
aV3 = -11.95; bV3 = 3.74;
aB = 0.84; bB = 0.0;
}
else if (Teff > 600.0 && Teff <= 1400.0)
{
aV1 = -12.96; bV1 = 4.33;
aV2 = -10.41; bV2 = 3.31;
aV3 = -6.97; bV3 = 1.94;
aB = 0.84; bB = 0.0;
}
else if (Teff > 1400.0 && Teff < 2000.0)
{
aV1 = -1.68; bV1 = 0.75;
aV2 = 6.96; bV2 = -2.21;
aV3 = 0.02; bV3 = -0.28;
aB = 3.0; bB = -0.69;
}
else if (Teff >= 2000.0)
{
aV1 = 10.37; bV1 = -2.91;
aV2 = -2.4; bV2 = 0.62;
aV3 = -16.54; bV3 = 4.74;
aB = 3.0; bB = -0.69;
}
// gam_P coefficents
if (Teff <= 1400.0)
{
aP = -2.36;
bP = 13.92;
cP = -19.38;
}
else
{
aP = -12.45;
bP = 82.25;
cP = -134.42;
}
}
// Calculation of all values
// Visual band gamma
gam_V[0] = pow(((double)10.0), (aV1 + bV1 * l10T));
gam_V[1] = pow(((double)10.0), (aV2 + bV2 * l10T));
gam_V[2] = pow(((double)10.0), (aV3 + bV3 * l10T));
// Visual band fractions
for (i = 0; i < 3; i++)
{
Beta_V[i] = ((double)1.0) / ((double)3.0);
}
// gamma_Planck - if < 1 then make it grey approximation (k_Planck = k_Ross, gam_P = 1)
gam_P = pow(((double)10.0), (aP * l10T2 + bP * l10T + cP));
if (gam_P < 1.0000001)
{
gam_P = 1.0000001;
}
// equivalent bandwidth value
Beta[0] = aB + bB * l10T;
Beta[1] = (1.0) - Beta[0];
// IR band kappa1/kappa2 ratio - Eq. 96 from Parmentier & Menou (2014)
RT = (gam_P - 1.0) / (2.0 * Beta[0] * Beta[1]);
R = 1.0 + RT + sqrt(pow(RT, 2.0) + RT);
// gam_1 and gam_2 values - Eq. 92, 93 from Parmentier & Menou (2014)
gam_1 = Beta[0] + R - Beta[0] * R;
gam_2 = gam_1 / R;
// Calculate tau_lim parameter
tau_lim = ((double)1.0) / (gam_1 * gam_2) * sqrt(gam_P / ((double)3.0));
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Calculates the Bond Albedo according to Parmentier et al. (2015) expression
void Bond_Parmentier(double Teff0, double grav, double& AB) {
// dependcies
//// pow from math
//// log10 from math
// Input:
// Teff0 - Atmospheric profile effective temperature [K] with zero albedo
// grav - Surface gravity of planet [m s-2]
// Call by reference (Input&Output):
// AB - Bond albedo
// work variables
double a, b;
// start operations
if (Teff0 <= 250.0)
{
a = ((double)-0.335) * pow(grav, ((double)0.070));
b = 0.0;
}
else if (Teff0 > 250.0 && Teff0 <= 750.0)
{
a = -0.335 * pow(grav, ((double)0.070)) + 2.149 * pow(grav, ((double)0.135));
b = -0.896 * pow(grav, ((double)0.135));
}
else if (Teff0 > 750.0 && Teff0 < 1250.0)
{
a = -0.335 * pow(grav, ((double)0.070)) - 0.428 * pow(grav, ((double)0.135));
b = 0.0;
}
else if (Teff0 >= 1250.0)
{
a = 16.947 - ((double)3.174) * pow(grav, ((double)0.070)) - 4.051 *
pow(grav, ((double)0.135));
b = -5.472 + ((double)0.917) * pow(grav, ((double)0.070)) + 1.170 *
pow(grav, ((double)0.135));
}
// Final Bond Albedo expression
AB = pow(((double)10.0), (a + b * log10(Teff0)));
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// This subroutine follows Parmentier & Guillot (2014, 2015) non-grey picket fence scheme
void Parmentier_IC(const int nlay, double* pl,
double* pe, double Tint, double mu, double Tirr,
double grav, double* (&Tl), int table_num, double met,
double* tau, double* kRoss) {
// dependcies
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// main_parameters::nlay1 -> "FMS_RC_para_&_const.cpp"
//// k_Rosseland_mod::Bond_Parmentier -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Freedman -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Valencia -> "k_Rosseland_modcpp"
//// pow -> math
//// sqrt -> math
//// exp -> math
// Input:
//
// Call by reference (Input & Output):
//
// work variables
int i, j, k;
double Teff0, Teff, Tmu, Bond, Tskin;
double gam_V[3] = { 0 }, Beta_V[3] = { 0 };
double Beta[2];
double gam_1, gam_2, gam_P, tau_lim;
double a0, a1, b0, A, B, At1, At2;
double a2[3], a3[3], b1[3], b2[3], b3[3], Av1[3], Av2[3];
double C[3], D[3], E[3];
double summy;
// start operations
// Effective temperature parameter
Tmu = pow((mu * pow(Tirr, 4.0)), (1.0 / 4.0));
// Find Bond albedo of planet - Bond albedo is given by mu = 1/sqrt(3)
Teff0 = pow(((pow(Tint, 4.0) + (1.0 / sqrt(((double)3.0))) * pow(Tirr, 4.0))), (1.0 / 4.0));
Bond_Parmentier(Teff0, grav, Bond);
Teff = pow((pow(Tint, 4.0) + (((double)1.0) - Bond) * mu * pow(Tirr, 4.0)), (1.0 / 4.0));
// Find the V band gamma, beta and IR gamma and beta ratios for this profile
// Passed mu, so make lat = acos(mu) and lon = 0
gam_Parmentier(Teff, table_num, gam_V, Beta_V, Beta, gam_1, gam_2,
gam_P, tau_lim);
for (i = 0; i < 3; i++)
{
gam_V[i] = gam_V[i] / mu;
}
// Hard work starts here - first calculate all the required coefficents
At1 = pow(gam_1, 2.0) * log(1.0 + 1.0 / (tau_lim * gam_1));
At2 = pow(gam_2, 2.0) * log(1.0 + 1.0 / (tau_lim * gam_2));
for (i = 0; i < 3; i++)
{
Av1[i] = pow(gam_1, 2.0) * log(1.0 + gam_V[i] / gam_1);
Av2[i] = pow(gam_2, 2.0) * log(1.0 + gam_V[i] / gam_2);
}
a0 = 1.0 / gam_1 + 1.0 / gam_2;
a1 = -1.0 / (((double)3.0) * pow(tau_lim, 2.0)) * (gam_P / (1.0 - gam_P) *
(gam_1 + gam_2 - 2.0) / (gam_1 + gam_2) +
(gam_1 + gam_2) * tau_lim - (At1 + At2) * pow(tau_lim, 2.0));
for (i = 0; i < 3; i++)
{
a2[i] = pow(tau_lim, 2.0) / (gam_P * pow(gam_V[i], 2.0)) *
((3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) * (3.0 * pow(gam_2, 2.0) - pow(gam_V[i], 2.0)) *
(gam_1 + gam_2) - 3.0 * gam_V[i] * (6.0 * pow(gam_1, 2.0) * pow(gam_2, 2.0) - pow(gam_V[i], 2.0) *
(pow(gam_1, 2.0) + pow(gam_2, 2.0)))) / (1.0 - pow(gam_V[i], 2.0) * pow(tau_lim, 2.0));
a3[i] = -pow(tau_lim, 2.0) * (3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) *
(3.0 * pow(gam_2, 2.0) - pow(gam_V[i], 2.0)) * (Av2[i] + Av1[i]) /
(gam_P * pow(gam_V[i], 3.0) * (1.0 - pow(gam_V[i], 2.0) * pow(tau_lim, 2.0)));
b1[i] = gam_1 * gam_2 * (3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) * (3.0 * pow(gam_2, 2.0) -
pow(gam_V[i], 2.0)) * pow(tau_lim, 2) / (gam_P * pow(gam_V[i], 2.0) *
(pow(gam_V[i], 2.0) * pow(tau_lim, 2.0) - 1.0));
b2[i] = 3.0 * (gam_1 + gam_2) * pow(gam_V[i], 3.0) / ((3.0 * pow(gam_1, 2.0) - pow(gam_V[i], 2.0)) *
(3.0 * pow(gam_2, 2.0) - pow(gam_V[i], 2.0)));
b3[i] = (Av2[i] - Av1[i]) / (gam_V[i] * (gam_1 - gam_2));
}
b0 = 1.0 / (gam_1 * gam_2 / (gam_1 - gam_2) * (At1 - At2) / 3.0 - pow((gam_1 * gam_2), 2.0) /
sqrt(3.0 * gam_P) - pow((gam_1 * gam_2), 3.0) /
((1.0 - gam_1) * (1.0 - gam_2) * (gam_1 + gam_2)));
A = 1.0 / ((double)3.0) * (a0 + a1 * b0);
B = -1.0 / ((double)3.0) * pow((gam_1 * gam_2), 2.0) / gam_P * b0;
for (i = 0; i < 3; i++)
{
C[i] = -1.0 / ((double)3.0) * (b0 * b1[i] * (1.0 + b2[i] + b3[i]) * a1 + a2[i] + a3[i]);
D[i] = 1.0 / ((double)3.0) * pow((gam_1 * gam_2), 2.0) / gam_P * b0 * b1[i] * (1.0 + b2[i] + b3[i]);
E[i] = (3.0 - pow((gam_V[i] / gam_1), 2.0)) * (3.0 - pow((gam_V[i] / gam_2), 2.0)) /
(9.0 * gam_V[i] * (pow((gam_V[i] * tau_lim), 2.0) - 1.0));
}
// T-p structure calculation - we follow exactly V. Parmentier's method
// Estimate the skin temperature by setting tau = 0
tau[0] = 0.0;
summy = 0.0;
for (i = 0; i < 3; i++)
{
summy += 3.0 * Beta_V[i] * pow(Tmu, 4.0) / 4.0 * (C[i] + D[i] * exp(-tau[0] / tau_lim) +
E[i] * exp(-gam_V[i] * tau[0]));
}
Tskin = 3.0 * pow(Tint, 4) / 4.0 * (tau[0] + A + B * exp(-tau[0] / tau_lim)) + summy;
Tskin = pow(Tskin, (1.0 / 4.0));
// Estimate the opacity TOA at the skin temperature - assume this is = first layer optacity
k_Ross_Freedman(Tskin, pl[0], met, kRoss[0]);
// k_Rosseland_mod::k_Ross_Valencia(Tskin, pe[0], met, kRoss[0]);
// Recalculate the upmost tau with new kappa
tau[0] = kRoss[0] / grav * pl[0];
// More accurate layer T at uppermost layer
summy = 0.0;
for (i = 0; i < 3; i++)
{
summy += 3.0 * Beta_V[i] * pow(Tmu, 4.0) / 4.0 * (C[i] + D[i] * exp(-tau[0] / tau_lim) +
E[i] * exp(-gam_V[i] * tau[0]));
}
Tl[0] = 3.0 * pow(Tint, 4) / 4.0 * (tau[0] + A + B * exp(-tau[0] / tau_lim)) + summy;
Tl[0] = pow(Tl[0], (1.0 / 4.0));
// Now we can loop in optical depth space to find the T-p profile
for (i = 1; i < nlay; i++)
{
// Initial guess for layer
k_Ross_Freedman(Tl[i - 1], sqrt(pl[i - 1] * pl[i]), met, kRoss[i]);
// call k_Rosseland_mod::k_Ross_Valencia(Tl[i-1], sqrt(pl[i-1]*pl[i], met, kRoss[i])
tau[i] = tau[i - 1] + kRoss[i] / grav * (pl[i] - pl[i - 1]);
summy = 0.0;
for (j = 0; j < 3; j++)
{
summy = +3.0 * Beta_V[j] * pow(Tmu, 4.0) / 4.0 * (C[j] + D[j] * exp(-tau[i] / tau_lim) +
E[j] * exp(-gam_V[j] * tau[i]));
}
Tl[i] = 3.0 * pow(Tint, 4.0) / 4.0 * (tau[i] + A + B * exp(-tau[i] / tau_lim)) + summy;
Tl[i] = pow(Tl[i], (1.0 / 4.0));
// Convergence loop
for (j = 0; j < 5; j++)
{
k_Ross_Freedman(sqrt(Tl[i - 1] * Tl[i]), sqrt(pl[i - 1] * pl[i]), met, kRoss[i]);
//call k_Rosseland_mod::k_Ross_Valencia(sqrt(Tl[i-1]*T[i]), sqrt(pl[i-1]*pl[i]), met, kRoss[i])
tau[i] = tau[i - 1] + kRoss[i] / grav * (pl[i] - pl[i - 1]);
summy = 0.0;
for (k = 0; k < 3; k++)
{
summy += 3.0 * Beta_V[k] * pow(Tmu, 4.0) / 4.0 * (C[k] + D[k] * exp(-tau[i] / tau_lim) +
E[k] * exp(-gam_V[k] * tau[i]));
}
Tl[i] = 3.0 * pow(Tint, 4.0) / 4.0 * (tau[i] + A + B * exp(-tau[i] / tau_lim)) + summy;
Tl[i] = pow(Tl[i], (1.0 / 4.0));
}
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
// Subroutine that corrects for adiabatic region following Parmentier & Guillot (2015)
void adiabat_correction(int nlay, double* (&Tl),
double* pl, double& prc, double* gradrad, double* gradad) {
// dependcies
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// pow -> math
/// log10 -> math
// Input:
//
// Call by reference (Input & Output):
//
// work variables
int i, iRC, iRC1;
// start operations
for (i = 0; i < (nlay - 1); i++)
{
gradrad[i] = (log10(Tl[i]) - log10(Tl[i + 1])) / (log10(pl[i]) - log10(pl[i + 1]));
gradad[i] = ((double)0.32) - ((double)0.10) * Tl[i] / ((double)3000.0);
}
gradrad[nlay - 1] = 0.0;
gradad[nlay - 1] = 0.0;
iRC = nlay - 2;
iRC1 = nlay - 2;
for (i = (nlay - 2); i >= 0; i--)
{
if (iRC1 <= i + 1)
{
if (gradrad[i] > ((double)0.7) * gradad[i])
{
iRC1 = i;
}
if (gradrad[i] > ((double)0.98) * gradad[i])
{
iRC = i;
prc = pl[iRC];
}
}
}
if (iRC < nlay)
{
for (i = iRC; i < nlay - 1; i++)
{
gradad[i] = (double)0.32 - ((double)0.10) * Tl[i] / ((double)3000.0);
if (gradad[i] < 0.0)
{
gradad[i] = 0.0;
}
Tl[i + 1] = Tl[i] * pow((pl[i + 1] / pl[i]), gradad[i]);
}
}
}
///////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////
void IC_profile(int iIC, bool corr, int nlay, double p0,
double* pl, double* pe,
double* k_V, double* k_IR,
double Tint, double mu, double Tirr, double grav, double fl,
double* Tl, double& prc, int table_num, double met,
double* tau_hf_e, double* kRoss_hf_e, double* tau_IRl_hf_l,
double* gradrad_hf_l, double* gradad_hf_l) {
// dependcies
//// pow() from math
//// log10() from math
//// atan() from math
//// namespace constants::onedivpi -> "FMS_RC_para_&_const.cpp"
//// main_parameters::nlay -> "FMS_RC_para_&_const.cpp"
//// main_parameters::nlay1 -> "FMS_RC_para_&_const.cpp"
//// k_Rosseland_mod::Bond_Parmentier() -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Freedman() -> "k_Rosseland_modcpp"
//// k_Rosseland_mod::k_Ross_Valencia() -> "k_Rosseland_modcpp"
//// sqrt() -> math
//// exp() -> math
// Input:
//
// Call by reference (Input & Output):
//
// start operations
switch (iIC)
{
case 4:
Parmentier_IC(nlay, pl, pe, Tint, mu, Tirr, grav, Tl, table_num, met,
tau_hf_e, kRoss_hf_e);
break;
default:
//std::cout << "Invalid IC integer in IC_mod, stopping" << std::endl;
break;
}
if (corr == true)
{
adiabat_correction(nlay, Tl, pl, prc, gradrad_hf_l, gradad_hf_l);
}
else
{
prc = p0;
}
}
#endif // IC_mod |
9,695 | #include "calculate.cuh"
__global__ void gpuCalculate(int* d_arr) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
d_arr[idx]++;
}
void callCudaFunc(int* carr, int n) {
int* d_arr;
cudaMalloc((void**)& d_arr, n * sizeof(int));
cudaMemcpy(d_arr, carr, n*sizeof(int), cudaMemcpyHostToDevice);
gpuCalculate<<<100000, 1000>>>(d_arr);
cudaMemcpy(carr, d_arr, n*sizeof(int), cudaMemcpyDeviceToHost);
}
|
9,696 | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <time.h>
struct BITMAPFILEHEADER
{
char bfType[2];
int bfSize;
int bfReserved;
int bfOffBits;
};
struct BITMAPINFOHEADER {
int biSize;
int biWidth;
int biHeight;
short biPlanes;
short biBitCount;
int biCompression;
int biSizeImage;
int biXPelsPerMeter;
int biYPelsPerMeter;
int biClrUsed;
int biClrImportant;
};
int write_bitmap(const char *filename, int width, int height, char *red, char *green, char *blue)
{
int bytes_per_line = (3 * (width + 1) / 4) * 4;
unsigned char *image_line = (unsigned char *)malloc(bytes_per_line);
struct BITMAPFILEHEADER bmph;
bmph.bfType[0] = 'B';
bmph.bfType[1] = 'M';
bmph.bfReserved = 0;
bmph.bfOffBits = 54;
bmph.bfSize = bmph.bfOffBits + bytes_per_line * height;
struct BITMAPINFOHEADER bmih;
bmih.biSize = 40;
bmih.biWidth = width;
bmih.biHeight = height;
bmih.biPlanes = 1;
bmih.biBitCount = 24;
bmih.biCompression = 0;
bmih.biSizeImage = bytes_per_line * height;
bmih.biXPelsPerMeter = 0;
bmih.biYPelsPerMeter = 0;
bmih.biClrUsed = 0;
bmih.biClrImportant = 0;
FILE *fit;
if((fit = fopen (filename, "wb"))==0) {
free(image_line);
return -1;
}
fwrite(&bmph.bfType, 2, 1, fit);
fwrite(&bmph.bfSize, 4, 1, fit);
fwrite(&bmph.bfReserved, 4, 1, fit);
fwrite(&bmph.bfOffBits, 4, 1, fit);
fwrite(&bmih.biSize, 4, 1, fit);
fwrite(&bmih.biWidth, 4, 1, fit);
fwrite(&bmih.biHeight, 4, 1, fit);
fwrite(&bmih.biPlanes, 2, 1, fit);
fwrite(&bmih.biBitCount, 2, 1, fit);
fwrite(&bmih.biCompression, 4, 1, fit);
fwrite(&bmih.biSizeImage, 4, 1, fit);
fwrite(&bmih.biXPelsPerMeter, 4, 1, fit);
fwrite(&bmih.biYPelsPerMeter, 4, 1, fit);
fwrite(&bmih.biClrUsed, 4, 1, fit);
fwrite(&bmih.biClrImportant, 4, 1, fit);
for(int i=height-1;i>=0;i--) {
for (int j=0;j<width;j++) {
int pos = (width * i + j);
image_line[3*j] = blue[pos];
image_line[3*j+1] = green[pos];
image_line[3*j+2] = red[pos];
}
fwrite((void *)image_line, bytes_per_line, 1, fit);
}
free(image_line);
fclose(fit);
return 0;
}
void mandel_host(char *red, char *green, char *blue, int width, int height)
{
for(int pos_x=0;pos_x<width;pos_x++) {
for(int pos_y=0;pos_y<height;pos_y++) {
float x0 = ((float)pos_x)*3.5/((float)width)-2.5;
float y0 = ((float)pos_y)*2.0/((float)height)-1.0;
float x = 0.0;
float y = 0.0;
int iteration = 0;
int max_iteration = 256;
while(x*x + y*y <= 4 && iteration < max_iteration) {
float xtemp = x*x - y*y + x0;
y = 2*x*y + y0;
x = xtemp;
iteration++;
}
int index = width*pos_y + pos_x;
if(iteration==max_iteration) {
iteration = 0;
}
red[index] = iteration;
green[index] = iteration;
blue[index] = iteration;
}
}
}
__global__ void mandel_cuda(char *red, char *green, char *blue, int width, int height)
{
/* kernel que calcula un pixel */
/* Per saber quin pixel és, cal tenir en compte totes les dimensions
del grid (el número de blocs i el número de threads */
/* Podeu fer servir els valors de
blockIdx.x, blockIdx.y
gridDim.x, gridDim.y
threadIdx.x, threadIdx.y
blockDim.x, blockDim.y */
int pos_x = threadIdx.x+blockDim.x*blockIdx.x;
int pos_y = threadIdx.y+blockDim.y*blockIdx.y;
float x0 = ((float)pos_x)*3.5/((float)width)-2.5;
float y0 = ((float)pos_y)*2.0/((float)height)-1.0;
float x = 0.0;
float y = 0.0;
int iteration = 0;
int max_iteration = 256;
while(x*x + y*y <= 4 && iteration < max_iteration) {
float xtemp = x*x - y*y + x0;
y = 2*x*y + y0;
x = xtemp;
iteration++;
}
int index = width*pos_y + pos_x;
if(iteration==max_iteration) {
iteration = 0;
}
red[index] = iteration;
green[index] = iteration;
blue[index] = iteration;
}
void fes_host(int width, int height)
{
size_t buffer_size = sizeof(char) * width * height;
char *image_red = (char *)malloc(buffer_size);
char *image_green = (char *)malloc(buffer_size);
char *image_blue = (char *)malloc(buffer_size);
mandel_host(image_red, image_green, image_blue, width, height);
// Now write the file
write_bitmap("output_host.bmp", width, height, image_red,
image_green, image_blue);
free(image_red);
free(image_green);
free(image_blue);
}
void fes_cuda(int width, int height)
{
size_t buffer_size = sizeof(char) * width * height;
char *image_red;
char *image_green;
char *image_blue;
cudaMalloc((void**)&image_red, buffer_size);
cudaMalloc((void**)&image_green, buffer_size);
cudaMalloc((void**)&image_blue, buffer_size);
clock_t t_device = clock();
dim3 blockDim(6, 6,1);
dim3 gridDim(width / blockDim.x, height / blockDim.y,1);
mandel_cuda<<< gridDim, blockDim,0>>>(image_red, image_green, image_blue, width, height);
char *host_image_red = (char*)malloc(buffer_size);
char *host_image_green= (char*)malloc(buffer_size);
char *host_image_blue= (char*)malloc(buffer_size);
/* cal copiar els valors de la imatge al host */
cudaMemcpy(host_image_red,image_red,buffer_size,cudaMemcpyDeviceToHost);
cudaMemcpy(host_image_green,image_green,buffer_size,cudaMemcpyDeviceToHost);
cudaMemcpy(host_image_blue,image_blue,buffer_size,cudaMemcpyDeviceToHost);
// Now write the file
write_bitmap("output_cuda.bmp", width, height, host_image_red,
host_image_green, host_image_blue);
t_device = clock() - t_device;
double time_taken_device = ((double)t_device)/CLOCKS_PER_SEC;
printf("GPU %f segons with %d threats \n", time_taken_device,blockDim.x);
/* cal alliberar la memòria del dispositiu i del host */
cudaFree(image_blue);
cudaFree(image_green);
cudaFree(image_red);
free(host_image_blue);
free(host_image_green);
free(host_image_red);
}
unsigned char* readBMP(const char* filename)
{
int i;
FILE* f = fopen(filename, "rb");
unsigned char info[54];
fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&info[18];
int height = *(int*)&info[22];
int size = 3 * width * height;
unsigned char* data = new unsigned char[size]; // allocate 3 bytes per pixel
fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once
fclose(f);
for(i = 0; i < size; i += 3)
{
unsigned char tmp = data[i];
data[i] = data[i+2];
data[i+2] = tmp;
}
return data;
}
int main(int argc, const char * argv[]) {
fes_cuda(5120, 5120);
fes_host(5120, 5120);
unsigned char *c , *h;
c = readBMP("output_cuda.bmp");
h = readBMP("output_host.bmp");
int errors =0;
int lengc = 5120*5120;
for(int i = 0 ; i < lengc;++i){
if(c[i] != h[i]){
errors++;
}
}
if(errors)printf("There are no difference,have %d errors\n",errors);
else printf("There are no difference\n");
return 0;
}
|
9,697 | #include "includes.h"
__global__ void cudaSBilinearTF_Forward_kernel( unsigned int outputWidth, unsigned int outputHeight, unsigned int nbChannels, unsigned int batchSize, unsigned int inputWidth, unsigned int inputHeight, const unsigned int* yLowIdx, const unsigned int* yHighIdx, const float* yInter, const unsigned int* xLowIdx, const unsigned int* xHighIdx, const float* xInter, const float* input, float* outputs)
{
const unsigned int inputOffset
= (blockIdx.z * blockDim.z + threadIdx.z) * nbChannels*inputWidth*inputHeight;
const unsigned int outputOffset
= (blockIdx.z * blockDim.z + threadIdx.z) * nbChannels*outputWidth*outputHeight;
for (unsigned int ch = blockIdx.x; ch < nbChannels; ch += gridDim.x)
{
for (unsigned int oy = threadIdx.y; oy < outputHeight; oy += blockDim.y)
{
for (unsigned int ox = threadIdx.x; ox < outputWidth; ox += blockDim.x)
{
const unsigned int indexTL = xLowIdx[ox] + yLowIdx[oy]*inputWidth
+ ch*inputWidth*inputHeight
+ inputOffset;
const unsigned int indexTR = xHighIdx[ox] + yLowIdx[oy]*inputWidth
+ ch*inputWidth*inputHeight
+ inputOffset;
const unsigned int indexBL = xLowIdx[ox] + yHighIdx[oy]*inputWidth
+ ch*inputWidth*inputHeight
+ inputOffset;
const unsigned int indexBR = xHighIdx[ox] + yHighIdx[oy]*inputWidth
+ ch*inputWidth*inputHeight
+ inputOffset;
const float top_left = input[indexTL];
const float top_right = input[indexTR];
const float bottom_left = input[indexBL];
const float bottom_right = input[indexBR];
const float top = top_left + (top_right - top_left) * xInter[ox];
const float bottom = bottom_left + (bottom_right - bottom_left) * xInter[ox];
outputs[ ox + oy*outputWidth
+ ch*outputWidth*outputHeight + outputOffset] = top + (bottom - top) * yInter[oy];
}
}
}
} |
9,698 | #include <math.h>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <random>
#ifndef N
#define N 4
#endif
#ifndef TEST
#define TEST 0
#endif
#ifndef TPB
#define TPB 1024
#endif
using namespace std;
float randomFloat() {
static mt19937 generator(42);
static uniform_real_distribution<double> dist(-2, 2);
return dist(generator);
}
__global__ void multiply(float* A, float* B, float* C) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N * N) return;
int i = idx / N, j = idx % N;
for (int k = 0; k < N; k++) C[i * N + j] += A[i * N + k] * B[k * N + j];
return;
}
int main() {
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::duration<float> fsec;
static float A[N * N], B[N * N], C[N * N] = {0};
int i, j;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++) {
A[i * N + j] = randomFloat();
B[i * N + j] = randomFloat();
}
float *A_dev, *B_dev, *C_dev;
int mat_size = N * N * sizeof(float);
int num_blocks = ceil(float(N * N) / TPB);
cudaMalloc((void**)&A_dev, mat_size);
cudaMalloc((void**)&B_dev, mat_size);
cudaMalloc((void**)&C_dev, mat_size);
auto start = Time::now();
cudaMemcpy(A_dev, A, mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(B_dev, B, mat_size, cudaMemcpyHostToDevice);
multiply<<<num_blocks, TPB>>>(A_dev, B_dev, C_dev);
cudaMemcpy(C, C_dev, mat_size, cudaMemcpyDeviceToHost);
auto stop = Time::now();
fsec timer1 = stop - start;
#if TEST
cout << C[N*N-1] << endl;
#endif
cout << timer1.count() * 1000 << " ms" << endl;
}
|
9,699 | #include "includes.h"
__global__ void pointGenKernel(float* points, float* dirs, int nBBS0, int nelems, float minimum, float step) {
int k = blockIdx.x / nBBS0;
int i = blockDim.x * (blockIdx.x - k * nBBS0) + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < nelems && j < nelems && k < nelems) {
float x = minimum + i * step;
float y = minimum + j * step;
float z = minimum + k * step;
int id = i + j * nelems + k * nelems * nelems;
points[3 * id + 0] = x;
points[3 * id + 1] = y;
points[3 * id + 2] = z;
dirs[3 * id + 0] = x - 10.f;
dirs[3 * id + 1] = y - 10.f;
dirs[3 * id + 2] = z - 10.f;
}
} |
9,700 | template<typename Destination, typename Data>
__global__ void sigmoidActivationForward(size_t elements, Destination *dst, Data *src) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = 1 / (1 + exp((float)-src[kernelIndex])); }
}
template<>
__global__ void sigmoidActivationForward(size_t elements, float *dst, float *src) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = 1 / (1 + exp(-src[kernelIndex])); }
}
template<>
__global__ void sigmoidActivationForward(size_t elements, double *dst, double *src) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = 1 / (1 + exp(-src[kernelIndex])); }
}
template<typename Destination, typename Data>
__global__ void sigmoidActivationBackward(size_t elements, Destination *dst, Data *src) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = src[kernelIndex] * (1 - src[kernelIndex]); }
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.